wakeupmh commited on
Commit
2269229
·
1 Parent(s): e3b3253
README.md CHANGED
@@ -8,7 +8,7 @@ sdk_version: 1.42.1
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
- short_description: '"explorando a riqueza das neurodivergências"'
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
+ short_description: 'Explorando a riqueza das neurodivergências'
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import streamlit as st
2
  import logging
3
- import asyncio
4
  from services.model_handler import ModelHandler
5
 
6
  # Configure logging
@@ -23,45 +22,29 @@ class AutismResearchApp:
23
  Pergunte o que quiser e eu vou analisar os últimos artigos científicos e fornecer uma resposta baseada em evidências.
24
  """)
25
 
26
- async def run(self):
27
  """Run the main application loop"""
28
-
29
  self._setup_streamlit()
30
 
31
  # Initialize session state for papers
32
  if 'papers' not in st.session_state:
33
  st.session_state.papers = []
34
 
35
- # Carregar modelos assincronamente
36
- with st.status("Carregando modelos...") as status:
37
- status.write("🔄 Inicializando modelos de linguagem...")
38
- await self.model_handler._load_models_async()
39
- status.write("✅ Modelos carregados com sucesso!")
40
-
41
  # Get user query
42
  col1, col2 = st.columns(2, vertical_alignment="bottom", gap="small")
43
 
44
  query = col1.text_input("O que você precisa saber?")
45
-
46
  if col2.button("Enviar"):
47
- if not query:
48
- st.error("Por favor, digite uma pergunta.")
49
- return
50
-
51
  # Show status while processing
52
  with st.status("Processando sua Pergunta...") as status:
53
- status.write("🔍 Buscando informações relevantes...")
54
- status.write("📚 Analisando dados...")
55
  status.write("✍️ Gerando resposta...")
56
-
57
- # Sempre usar o modelo, nunca a resposta padrão
58
- self.model_handler.force_default_response = False
59
-
60
- answer = await self.model_handler.generate_answer_async(query)
61
 
62
  status.write("✨ Resposta gerada! Exibindo resultados...")
63
 
64
- st.success("✅ Resposta gerada com sucesso!")
65
 
66
 
67
  st.markdown("### Resposta")
@@ -69,7 +52,7 @@ class AutismResearchApp:
69
 
70
  def main():
71
  app = AutismResearchApp()
72
- asyncio.run(app.run())
73
 
74
  if __name__ == "__main__":
75
  main()
 
1
  import streamlit as st
2
  import logging
 
3
  from services.model_handler import ModelHandler
4
 
5
  # Configure logging
 
22
  Pergunte o que quiser e eu vou analisar os últimos artigos científicos e fornecer uma resposta baseada em evidências.
23
  """)
24
 
25
+ def run(self):
26
  """Run the main application loop"""
 
27
  self._setup_streamlit()
28
 
29
  # Initialize session state for papers
30
  if 'papers' not in st.session_state:
31
  st.session_state.papers = []
32
 
 
 
 
 
 
 
33
  # Get user query
34
  col1, col2 = st.columns(2, vertical_alignment="bottom", gap="small")
35
 
36
  query = col1.text_input("O que você precisa saber?")
 
37
  if col2.button("Enviar"):
 
 
 
 
38
  # Show status while processing
39
  with st.status("Processando sua Pergunta...") as status:
40
+ status.write("🔍 Buscando papers de pesquisa relevantes...")
41
+ status.write("📚 Analisando papers de pesquisa...")
42
  status.write("✍️ Gerando resposta...")
43
+ answer = self.model_handler.generate_answer(query)
 
 
 
 
44
 
45
  status.write("✨ Resposta gerada! Exibindo resultados...")
46
 
47
+ st.success("✅ Resposta gerada com base nos artigos de pesquisa encontrados.")
48
 
49
 
50
  st.markdown("### Resposta")
 
52
 
53
  def main():
54
  app = AutismResearchApp()
55
+ app.run()
56
 
57
  if __name__ == "__main__":
58
  main()
requirements.txt CHANGED
@@ -1,13 +1,10 @@
1
  transformers>=4.36.2
2
  streamlit>=1.29.0
3
  --extra-index-url https://download.pytorch.org/whl/cpu
4
- torch>=2.1.0
5
  accelerate>=0.26.0
6
  arxiv>=1.4.7
7
  python-dotenv>=1.0.0
8
- agno==1.1.5
 
9
  pypdf>=3.11.1
10
- watchdog>=2.3.1
11
- sentencepiece>=0.1.99
12
- tenacity>=8.2.2
13
- asyncio
 
1
  transformers>=4.36.2
2
  streamlit>=1.29.0
3
  --extra-index-url https://download.pytorch.org/whl/cpu
 
4
  accelerate>=0.26.0
5
  arxiv>=1.4.7
6
  python-dotenv>=1.0.0
7
+ agno==1.0.6
8
+ ollama>=0.4.7
9
  pypdf>=3.11.1
10
+ watchdog>=2.3.1
 
 
 
services/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (179 Bytes). View file
 
services/__pycache__/model_handler.cpython-311.pyc ADDED
Binary file (6.53 kB). View file
 
services/__pycache__/research_fetcher.cpython-311.pyc ADDED
Binary file (17.9 kB). View file
 
services/model_handler.py CHANGED
@@ -1,844 +1,128 @@
1
  import logging
2
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
  import streamlit as st
4
  from agno.agent import Agent
 
5
  from agno.tools.arxiv import ArxivTools
6
  from agno.tools.pubmed import PubmedTools
7
- from agno.models.base import Model
8
- from tenacity import retry, stop_after_attempt, wait_exponential
9
- import time
10
- import datetime
11
- import os
12
- from typing import Tuple, Optional, Dict, Any, List
13
 
14
- # Configuração de logging
15
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
16
- logger = logging.getLogger(__name__)
17
-
18
- # Configurações dos modelos
19
- MODEL_CONFIG = {
20
- "translator": {
21
- "primary": "facebook/nllb-200-distilled-600M",
22
- "fallback": "google/flan-t5-base"
23
- },
24
- "researcher": {
25
- "primary": "google/flan-t5-large",
26
- "fallback": "google/flan-t5-base"
27
- },
28
- "presenter": {
29
- "primary": "google/flan-t5-base",
30
- "fallback": "google/flan-t5-small"
31
- }
32
- }
33
-
34
- # Simple Response class to wrap the model output
35
- class Response:
36
- def __init__(self, content):
37
- # Ensure content is a string and not empty
38
- if content is None:
39
- content = ""
40
- if not isinstance(content, str):
41
- content = str(content)
42
-
43
- # Store the content
44
- self.content = content
45
-
46
- # Add tool_calls attribute with default empty list
47
- self.tool_calls = []
48
-
49
- # Add other attributes that might be needed
50
- self.audio = None
51
- self.images = []
52
- self.citations = []
53
- self.metadata = {}
54
- self.finish_reason = "stop"
55
- self.usage = {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}
56
-
57
- # Add timestamp attributes
58
- current_time = time.time()
59
- self.created_at = int(current_time) # Convert to integer
60
- self.created = int(current_time)
61
- self.timestamp = datetime.datetime.now().isoformat()
62
-
63
- # Add model info attributes
64
- self.id = "local-model-response"
65
- self.model = "local-huggingface"
66
- self.object = "chat.completion"
67
- self.choices = [{"index": 0, "message": {"role": "assistant", "content": content}, "finish_reason": "stop"}]
68
-
69
- # Add additional attributes that might be needed
70
- self.system_fingerprint = ""
71
- self.is_truncated = False
72
- self.role = "assistant"
73
-
74
- def __str__(self):
75
- return self.content if self.content else ""
76
-
77
- def __repr__(self):
78
- return f"Response(content='{self.content[:50]}{'...' if len(self.content) > 50 else ''}')"
79
-
80
- # Personalizada classe para modelos locais
81
- class LocalHuggingFaceModel(Model):
82
- def __init__(self, model, tokenizer, model_id="local-huggingface", max_length=512):
83
- super().__init__(id=model_id)
84
- self.model = model
85
- self.tokenizer = tokenizer
86
- self.max_length = max_length
87
- self.model_name = model_id
88
-
89
- async def ainvoke(self, prompt: str, **kwargs) -> str:
90
- """Async invoke method"""
91
- try:
92
- logging.info(f"[{self.model_name}] ainvoke called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
93
- # Não usar await com o método invoke que é síncrono
94
- return self.invoke(prompt, **kwargs)
95
- except Exception as e:
96
- logging.error(f"[{self.model_name}] Error in ainvoke: {str(e)}")
97
- return Response(f"Error in ainvoke: {str(e)}")
98
-
99
- async def ainvoke_stream(self, prompt: str, **kwargs):
100
- """Async streaming invoke method"""
101
- try:
102
- logging.info(f"[{self.model_name}] ainvoke_stream called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
103
- result = self.invoke(prompt, **kwargs)
104
- yield result
105
- except Exception as e:
106
- logging.error(f"[{self.model_name}] Error in ainvoke_stream: {str(e)}")
107
- yield Response(f"Error in ainvoke_stream: {str(e)}")
108
-
109
- async def aresponse_stream(self, prompt: str, **kwargs):
110
- """
111
- Método abstrato necessário para implementar a interface Model da biblioteca agno.
112
- Este método deve retornar um gerador assíncrono de objetos Response.
113
- """
114
- try:
115
- logging.info(f"[{self.model_name}] aresponse_stream called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
116
- result = self.invoke(prompt, **kwargs)
117
- yield result
118
- except Exception as e:
119
- logging.error(f"[{self.model_name}] Error in aresponse_stream: {str(e)}")
120
- yield Response(f"Error in aresponse_stream: {str(e)}")
121
-
122
- def invoke(self, prompt: str, **kwargs) -> str:
123
- """Synchronous invoke method"""
124
- try:
125
- logging.info(f"[{self.model_name}] Invoking model with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
126
-
127
- # Check if prompt is None or empty
128
- if prompt is None:
129
- logging.warning(f"[{self.model_name}] None prompt provided to invoke method")
130
- return Response("No input provided. Please provide a valid prompt.")
131
-
132
- if not isinstance(prompt, str):
133
- logging.warning(f"[{self.model_name}] Non-string prompt provided: {type(prompt)}")
134
- try:
135
- prompt = str(prompt)
136
- logging.info(f"[{self.model_name}] Converted prompt to string: {prompt[:100]}...")
137
- except:
138
- return Response("Invalid input type. Please provide a string prompt.")
139
-
140
- if not prompt.strip():
141
- logging.warning(f"[{self.model_name}] Empty prompt provided to invoke method")
142
- return Response("No input provided. Please provide a non-empty prompt.")
143
-
144
- inputs = self.tokenizer(prompt, return_tensors="pt", padding=True)
145
-
146
- # Configure generation parameters
147
- generation_config = {
148
- "max_length": self.max_length,
149
- "num_return_sequences": 1,
150
- "do_sample": kwargs.get("do_sample", False),
151
- "temperature": kwargs.get("temperature", 1.0),
152
- "top_p": kwargs.get("top_p", 1.0),
153
- }
154
-
155
- # Generate the answer
156
- outputs = self.model.generate(**inputs, **generation_config)
157
- decoded_output = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
158
-
159
- # Check if output is empty
160
- if not decoded_output or not decoded_output.strip():
161
- logging.warning(f"[{self.model_name}] Model generated empty output")
162
- return Response("The model did not generate any output. Please try with a different prompt.")
163
-
164
- logging.info(f"[{self.model_name}] Model generated output: {decoded_output[:100]}...")
165
- return Response(decoded_output)
166
- except Exception as e:
167
- logging.error(f"[{self.model_name}] Error in local model generation: {str(e)}")
168
- if hasattr(e, 'args') and len(e.args) > 0:
169
- error_message = e.args[0]
170
- else:
171
- error_message = str(e)
172
- return Response(f"Error during generation: {error_message}")
173
-
174
- def invoke_stream(self, prompt: str, **kwargs):
175
- """Synchronous streaming invoke method"""
176
- try:
177
- logging.info(f"[{self.model_name}] invoke_stream called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
178
- result = self.invoke(prompt, **kwargs)
179
- yield result
180
- except Exception as e:
181
- logging.error(f"[{self.model_name}] Error in invoke_stream: {str(e)}")
182
- yield Response(f"Error in invoke_stream: {str(e)}")
183
-
184
- def parse_provider_response(self, response: str) -> str:
185
- """Parse the provider response"""
186
- return response
187
-
188
- def parse_provider_response_delta(self, delta: str) -> str:
189
- """Parse the provider response delta for streaming"""
190
- return delta
191
-
192
- async def aresponse(self, prompt=None, **kwargs):
193
- """Async response method - required abstract method"""
194
- try:
195
- # Log detalhado de todos os argumentos
196
- logging.info(f"[{self.model_name}] aresponse args: prompt={prompt}, kwargs keys={list(kwargs.keys())}")
197
-
198
- # Extrair o prompt das mensagens se estiverem disponíveis
199
- if prompt is None and 'messages' in kwargs and kwargs['messages']:
200
- messages = kwargs['messages']
201
- # Procurar pela mensagem do usuário
202
- for message in messages:
203
- if hasattr(message, 'role') and message.role == 'user' and hasattr(message, 'content'):
204
- prompt = message.content
205
- logging.info(f"[{self.model_name}] Extracted prompt from user message: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
206
- break
207
-
208
- # Verificar se o prompt está em kwargs['input']
209
- if prompt is None:
210
- if 'input' in kwargs:
211
- prompt = kwargs.get('input')
212
- logging.info(f"[{self.model_name}] Found prompt in kwargs['input']: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
213
-
214
- logging.info(f"[{self.model_name}] aresponse called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
215
-
216
- if not prompt or not isinstance(prompt, str) or not prompt.strip():
217
- logging.warning(f"[{self.model_name}] Empty or invalid prompt in aresponse")
218
- return Response("No input provided. Please provide a valid prompt.")
219
-
220
- content = await self.ainvoke(prompt, **kwargs)
221
- return content if isinstance(content, Response) else Response(content)
222
- except Exception as e:
223
- logging.error(f"[{self.model_name}] Error in aresponse: {str(e)}")
224
- return Response(f"Error in aresponse: {str(e)}")
225
-
226
- def response(self, prompt=None, **kwargs):
227
- """Synchronous response method - required abstract method"""
228
- try:
229
- # Log detalhado de todos os argumentos
230
- logging.info(f"[{self.model_name}] response args: prompt={prompt}, kwargs keys={list(kwargs.keys())}")
231
-
232
- # Extrair o prompt das mensagens se estiverem disponíveis
233
- if prompt is None and 'messages' in kwargs and kwargs['messages']:
234
- messages = kwargs['messages']
235
- # Procurar pela mensagem do usuário
236
- for message in messages:
237
- if hasattr(message, 'role') and message.role == 'user' and hasattr(message, 'content'):
238
- prompt = message.content
239
- logging.info(f"[{self.model_name}] Extracted prompt from user message: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
240
- break
241
-
242
- # Verificar se o prompt está em kwargs['input']
243
- if prompt is None:
244
- if 'input' in kwargs:
245
- prompt = kwargs.get('input')
246
- logging.info(f"[{self.model_name}] Found prompt in kwargs['input']: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
247
-
248
- logging.info(f"[{self.model_name}] response called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
249
-
250
- if not prompt or not isinstance(prompt, str) or not prompt.strip():
251
- logging.warning(f"[{self.model_name}] Empty or invalid prompt in response")
252
- return Response("No input provided. Please provide a valid prompt.")
253
-
254
- content = self.invoke(prompt, **kwargs)
255
- return content if isinstance(content, Response) else Response(content)
256
- except Exception as e:
257
- logging.error(f"[{self.model_name}] Error in response: {str(e)}")
258
- return Response(f"Error in response: {str(e)}")
259
-
260
- def response_stream(self, prompt=None, **kwargs):
261
- """Synchronous streaming response method - required abstract method"""
262
- try:
263
- # Log detalhado de todos os argumentos
264
- logging.info(f"[{self.model_name}] response_stream args: prompt={prompt}, kwargs keys={list(kwargs.keys())}")
265
-
266
- # Extrair o prompt das mensagens se estiverem disponíveis
267
- if prompt is None and 'messages' in kwargs and kwargs['messages']:
268
- messages = kwargs['messages']
269
- # Procurar pela mensagem do usuário
270
- for message in messages:
271
- if hasattr(message, 'role') and message.role == 'user' and hasattr(message, 'content'):
272
- prompt = message.content
273
- logging.info(f"[{self.model_name}] Extracted prompt from user message: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
274
- break
275
-
276
- # Verificar se o prompt está em kwargs['input']
277
- if prompt is None:
278
- if 'input' in kwargs:
279
- prompt = kwargs.get('input')
280
- logging.info(f"[{self.model_name}] Found prompt in kwargs['input']: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
281
-
282
- logging.info(f"[{self.model_name}] response_stream called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
283
-
284
- if not prompt or not isinstance(prompt, str) or not prompt.strip():
285
- logging.warning(f"[{self.model_name}] Empty or invalid prompt in response_stream")
286
- yield Response("No input provided. Please provide a valid prompt.")
287
- return
288
-
289
- for chunk in self.invoke_stream(prompt, **kwargs):
290
- yield chunk if isinstance(chunk, Response) else Response(chunk)
291
- except Exception as e:
292
- logging.error(f"[{self.model_name}] Error in response_stream: {str(e)}")
293
- yield Response(f"Error in response_stream: {str(e)}")
294
-
295
- def generate(self, prompt: str, **kwargs):
296
- try:
297
- inputs = self.tokenizer(prompt, return_tensors="pt", padding=True)
298
-
299
- # Configure generation parameters
300
- generation_config = {
301
- "max_length": self.max_length,
302
- "num_return_sequences": 1,
303
- "do_sample": kwargs.get("do_sample", False),
304
- "temperature": kwargs.get("temperature", 1.0),
305
- "top_p": kwargs.get("top_p", 1.0),
306
- }
307
-
308
- # Generate the answer
309
- outputs = self.model.generate(**inputs, **generation_config)
310
- decoded_output = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
311
-
312
- return decoded_output
313
- except Exception as e:
314
- logging.error(f"[{self.model_name}] Error in generate method: {str(e)}")
315
- if hasattr(e, 'args') and len(e.args) > 0:
316
- error_message = e.args[0]
317
- else:
318
- error_message = str(e)
319
- return f"Error during generation: {error_message}"
320
 
321
  class ModelHandler:
322
- """
323
- Classe para gerenciar múltiplos modelos e gerar respostas.
324
- """
325
-
326
  def __init__(self):
327
- """
328
- Inicializa o ModelHandler com múltiplos modelos.
329
- """
330
  self.translator = None
331
  self.researcher = None
 
332
  self.presenter = None
333
- self.force_default_response = False
334
- self.models = {}
335
-
336
- # Os modelos serão carregados posteriormente de forma assíncrona
337
- logging.info("ModelHandler initialized. Models will be loaded asynchronously.")
338
-
339
- def _extract_content(self, result):
340
- """
341
- Extrai o conteúdo de uma resposta do modelo.
342
-
343
- Args:
344
- result: A resposta do modelo, que pode ser um objeto RunResponse ou uma string
345
-
346
- Returns:
347
- O conteúdo da resposta como string
348
- """
349
- try:
350
- if result is None:
351
- return ""
352
-
353
- if hasattr(result, 'content'):
354
- return result.content
355
-
356
- return str(result)
357
- except Exception as e:
358
- logging.error(f"Error extracting content: {str(e)}")
359
- return ""
360
-
361
- async def _load_models_async(self):
362
- """
363
- Carrega os modelos de forma assíncrona.
364
- """
365
- logging.info("Loading models asynchronously...")
366
- self._load_models()
367
- logging.info("Models loaded asynchronously")
368
 
369
- def _format_prompt(self, prompt_type, content):
370
- """
371
- Formata o prompt de acordo com o tipo.
372
-
373
- Args:
374
- prompt_type: O tipo de prompt (translation, research, presentation)
375
- content: O conteúdo a ser incluído no prompt
376
-
377
- Returns:
378
- O prompt formatado
379
- """
380
- if not content or not content.strip():
381
- logging.warning(f"Empty content provided to _format_prompt for {prompt_type}")
382
- return "No input provided."
383
-
384
- if prompt_type == "translation":
385
- return f"""Task: Translate the following text to English
386
-
387
- Instructions:
388
- Provide a direct English translation of the input text.
389
-
390
- Input: {content}
391
-
392
- Output:"""
393
- elif prompt_type == "research":
394
- return f"""Task: Research Assistant
395
-
396
- Instructions:
397
- You are a research assistant tasked with providing comprehensive information.
398
- Please provide a detailed explanation about the topic, including:
399
- - Definition and key characteristics
400
- - Causes or origins if applicable
401
- - Current scientific understanding
402
- - Important facts and statistics
403
- - Recent developments or research
404
- - Real-world implications and applications
405
-
406
- Search for relevant academic papers and medical resources using the provided tools.
407
- Make sure to include findings from recent research in your response.
408
- Use ArxivTools and PubmedTools to find the most relevant and up-to-date information.
409
-
410
- Aim to write at least 4-5 paragraphs with detailed information.
411
- Be thorough and informative, covering all important aspects of the topic.
412
- Use clear and accessible language suitable for a general audience.
413
-
414
- Input: {content}
415
-
416
- Output:"""
417
- elif prompt_type == "presentation":
418
- return f"""Task: Presentation Assistant
419
-
420
- Instructions:
421
- You are presenting research findings to a general audience.
422
- Please format the information in a clear, engaging, and accessible way.
423
- Include:
424
- - A clear introduction to the topic with a compelling title
425
- - Key points organized with headings or bullet points
426
- - Simple explanations of complex concepts
427
- - A brief conclusion or summary
428
- - Translate the entire response to Portuguese
429
- - Add appropriate emojis to make the presentation more engaging
430
- - Format the text using markdown for better readability
431
-
432
- Input: {content}
433
-
434
- Output:"""
435
- else:
436
- logging.error(f"Unknown prompt type: {prompt_type}")
437
- return f"Unknown prompt type: {prompt_type}"
438
-
439
- @staticmethod
440
- def _load_specific_model(model_name: str, purpose: str) -> Tuple[Optional[Any], Optional[Any]]:
441
- """
442
- Load a specific model with retry logic
443
-
444
- Args:
445
- model_name: The name of the model to load
446
- purpose: What the model will be used for (logging purposes)
447
-
448
- Returns:
449
- A tuple of (model, tokenizer) or (None, None) if loading fails
450
- """
451
- try:
452
- logging.info(f"Attempting to load {purpose} model: {model_name}")
453
-
454
- # Criar diretório de cache se não existir
455
- cache_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "model_cache")
456
- os.makedirs(cache_dir, exist_ok=True)
457
-
458
- # Carregar modelo e tokenizer
459
- tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
460
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name, cache_dir=cache_dir)
461
-
462
- logging.info(f"Successfully loaded {purpose} model: {model_name}")
463
- return model, tokenizer
464
- except Exception as e:
465
- logging.error(f"Error loading {purpose} model {model_name}: {str(e)}")
466
- return None, None
467
-
468
- @staticmethod
469
- @st.cache_resource
470
- def _load_fallback_model():
471
- """Load a fallback model"""
472
- # Define retry decorator for model loading
473
- @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
474
- def load_with_retry(model_name):
475
- try:
476
- logging.info(f"Attempting to load fallback model from {model_name}")
477
-
478
- # Criar diretório de cache se não existir
479
- cache_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "model_cache")
480
- os.makedirs(cache_dir, exist_ok=True)
481
-
482
- # Carregar modelo e tokenizer
483
- tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
484
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name, cache_dir=cache_dir)
485
-
486
- logging.info(f"Successfully loaded fallback model from {model_name}")
487
- return model, tokenizer
488
- except Exception as e:
489
- logging.error(f"Error loading fallback model {model_name}: {str(e)}")
490
- raise
491
-
492
- # Lista de modelos para tentar, em ordem de preferência
493
- model_names = ["google/flan-t5-small", "google/flan-t5-base"]
494
-
495
- # Tentar carregar cada modelo na lista
496
- for model_name in model_names:
497
- try:
498
- return load_with_retry(model_name)
499
- except Exception as e:
500
- logging.error(f"Failed to load fallback model {model_name}: {str(e)}")
501
- continue
502
-
503
- # Se todos os modelos falharem, retornar None
504
- logging.error("All fallback models failed to load")
505
- return None, None
506
-
507
- def _get_default_research_content(self, topic):
508
- """
509
- Gera conteúdo de pesquisa padrão quando não for possível gerar com o modelo.
510
-
511
- Args:
512
- topic: O tópico da pesquisa
513
-
514
- Returns:
515
- Conteúdo de pesquisa padrão
516
- """
517
- return f"""
518
- # Research on {topic}
519
-
520
- ## Definition and Key Characteristics
521
-
522
- {topic} is a subject of significant interest in various fields. While detailed information is currently limited in our system, we understand that it encompasses several key characteristics and has important implications.
523
-
524
- ## Current Understanding
525
-
526
- Research on {topic} continues to evolve, with new findings emerging regularly. The current understanding suggests multiple dimensions to consider when approaching this topic.
527
-
528
- ## Applications and Implications
529
-
530
- The study of {topic} has several real-world applications and implications that affect various sectors including healthcare, education, and social services.
531
-
532
- ## Conclusion
533
-
534
- While our current information on {topic} is limited, it represents an important area for continued research and understanding. For more detailed information, consulting specialized literature and experts is recommended.
535
- """
536
-
537
- def _get_default_presentation_content(self):
538
- """
539
- Gera conteúdo de apresentação padrão quando não for possível gerar com o modelo.
540
-
541
- Returns:
542
- Conteúdo de apresentação padrão
543
- """
544
- return """
545
- 🧠 **Entendendo o Tópico** 🧠
546
-
547
- ## O que é?
548
- Este é um tópico complexo com múltiplas dimensões e implicações. Embora as informações detalhadas sejam limitadas no momento, podemos fornecer uma visão geral.
549
-
550
- ## Características Principais:
551
- - 🔍 Possui características distintas que o definem
552
- - 📊 Apresenta variações significativas entre diferentes casos
553
- - 🔬 É objeto de pesquisa contínua em diversos campos
554
-
555
- ## Aplicações e Implicações:
556
- - 🏥 Impacto em áreas como saúde e bem-estar
557
- - 🎓 Relevância para educação e desenvolvimento
558
- - 👪 Influência em dinâmicas sociais e familiares
559
-
560
- ## Conclusão:
561
- Para informações mais detalhadas e específicas, recomendamos consultar literatura especializada e profissionais da área. A compreensão deste tópico continua a evoluir com novas pesquisas.
562
-
563
- *Fonte: Análise de pesquisas científicas atuais*
564
- """
565
-
566
- def _load_models(self):
567
- """Carrega múltiplos modelos para diferentes propósitos"""
568
- # Carregar modelo de tradução
569
- translator_model, translator_tokenizer = self._load_specific_model(
570
- MODEL_CONFIG["translator"]["primary"], "translator"
571
- )
572
-
573
- # Carregar modelo de pesquisa
574
- researcher_model, researcher_tokenizer = self._load_specific_model(
575
- MODEL_CONFIG["researcher"]["primary"], "researcher"
576
- )
577
-
578
- # Carregar modelo de apresentação
579
- presenter_model, presenter_tokenizer = self._load_specific_model(
580
- MODEL_CONFIG["presenter"]["primary"], "presenter"
581
- )
582
-
583
- # Carregar modelo de fallback
584
- fallback_model, fallback_tokenizer = self._load_fallback_model()
585
-
586
- # Criar modelos locais
587
- if translator_model and translator_tokenizer:
588
- self.models["translator"] = LocalHuggingFaceModel(
589
- translator_model,
590
- translator_tokenizer,
591
- model_id=MODEL_CONFIG["translator"]["primary"]
592
- )
593
- else:
594
- # Tentar carregar o modelo fallback para tradutor
595
- fallback_translator, fallback_translator_tokenizer = self._load_specific_model(
596
- MODEL_CONFIG["translator"]["fallback"], "translator fallback"
597
- )
598
-
599
- if fallback_translator and fallback_translator_tokenizer:
600
- self.models["translator"] = LocalHuggingFaceModel(
601
- fallback_translator,
602
- fallback_translator_tokenizer,
603
- model_id=MODEL_CONFIG["translator"]["fallback"]
604
- )
605
- else:
606
- self.models["translator"] = LocalHuggingFaceModel(
607
- fallback_model,
608
- fallback_tokenizer,
609
- model_id="fallback-model"
610
- )
611
-
612
- if researcher_model and researcher_tokenizer:
613
- self.models["researcher"] = LocalHuggingFaceModel(
614
- researcher_model,
615
- researcher_tokenizer,
616
- model_id=MODEL_CONFIG["researcher"]["primary"]
617
- )
618
- else:
619
- # Tentar carregar o modelo fallback para pesquisador
620
- fallback_researcher, fallback_researcher_tokenizer = self._load_specific_model(
621
- MODEL_CONFIG["researcher"]["fallback"], "researcher fallback"
622
- )
623
-
624
- if fallback_researcher and fallback_researcher_tokenizer:
625
- self.models["researcher"] = LocalHuggingFaceModel(
626
- fallback_researcher,
627
- fallback_researcher_tokenizer,
628
- model_id=MODEL_CONFIG["researcher"]["fallback"]
629
- )
630
- else:
631
- self.models["researcher"] = LocalHuggingFaceModel(
632
- fallback_model,
633
- fallback_tokenizer,
634
- model_id="fallback-model"
635
- )
636
-
637
- if presenter_model and presenter_tokenizer:
638
- self.models["presenter"] = LocalHuggingFaceModel(
639
- presenter_model,
640
- presenter_tokenizer,
641
- model_id=MODEL_CONFIG["presenter"]["primary"]
642
- )
643
- else:
644
- # Tentar carregar o modelo fallback para apresentador
645
- fallback_presenter, fallback_presenter_tokenizer = self._load_specific_model(
646
- MODEL_CONFIG["presenter"]["fallback"], "presenter fallback"
647
- )
648
-
649
- if fallback_presenter and fallback_presenter_tokenizer:
650
- self.models["presenter"] = LocalHuggingFaceModel(
651
- fallback_presenter,
652
- fallback_presenter_tokenizer,
653
- model_id=MODEL_CONFIG["presenter"]["fallback"]
654
- )
655
- else:
656
- self.models["presenter"] = LocalHuggingFaceModel(
657
- fallback_model,
658
- fallback_tokenizer,
659
- model_id="fallback-model"
660
- )
661
-
662
- # Configurar agentes com seus respectivos modelos
663
  self.translator = Agent(
664
  name="Translator",
665
  role="You will translate the query to English",
666
- model=self.models["translator"],
667
  goal="Translate to English",
668
  instructions=[
669
- "Translate the query to English",
670
- "Preserve all key information from the original query",
671
- "Return only the translated text without additional comments"
672
  ]
673
  )
674
 
675
- # Configurar o agente de pesquisa com as ferramentas ArxivTools e PubmedTools
676
  self.researcher = Agent(
677
  name="Researcher",
678
  role="You are a research scholar who specializes in autism research.",
679
- model=self.models["researcher"],
 
680
  instructions=[
681
- "You need to understand the context of the question to provide the best answer.",
682
- "Be precise and provide detailed information.",
683
- "You must create an accessible explanation.",
 
684
  "The content must be for people without autism knowledge.",
685
- "Focus on providing comprehensive information about the topic.",
686
- "Include definition, characteristics, causes, and current understanding.",
687
- "ALWAYS use the provided tools (ArxivTools and PubmedTools) to search for relevant information.",
688
- "Cite specific papers and studies in your response when appropriate.",
689
- "When using tools, specify the search query clearly in your thoughts before making the call."
690
  ],
691
- tools=[
692
- ArxivTools(), # Usar ferramentas ArxivTools
693
- PubmedTools() # Usar ferramentas PubmedTools
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
694
  ],
 
 
 
695
  )
696
 
697
  self.presenter = Agent(
698
  name="Presenter",
699
  role="You are a professional researcher who presents the results of the research.",
700
- model=self.models["presenter"],
701
  instructions=[
702
  "You are multilingual",
703
- "You must present the results in a clear and engaging manner.",
704
- "Format the information with headings and bullet points.",
705
- "Provide simple explanations of complex concepts.",
706
- "Include a brief conclusion or summary.",
707
- "Add emojis to make the presentation more interactive.",
708
- "Translate the answer to Portuguese.",
709
- "Maintain any citations or references from the research in your presentation.",
710
- "Do not add fictional information not present in the research."
711
- ]
 
712
  )
713
 
714
- logging.info("Models and agents loaded successfully.")
715
 
716
- async def _run_with_tools(self, agent, prompt, max_steps=5):
717
- """
718
- Executa um agente com suporte a ferramentas e gerencia a execução.
719
-
720
- Args:
721
- agent: O agente a ser executado
722
- prompt: O prompt a ser enviado para o agente
723
- max_steps: Número máximo de passos para execução
724
-
725
- Returns:
726
- O resultado da execução do agente
727
- """
728
  try:
729
- logging.info(f"Running agent {agent.name} with tools")
730
- # O método arun retorna um coroutine que precisa ser awaited
731
- result = await agent.arun(prompt, max_steps=max_steps)
732
- logging.info(f"Agent {agent.name} execution complete")
733
- return result
734
  except Exception as e:
735
- logging.error(f"Error during agent {agent.name} execution: {str(e)}")
736
- return f"Error during {agent.name} execution: {str(e)}"
737
 
738
- async def generate_answer_async(self, query: str) -> str:
739
- """
740
- Gera uma resposta baseada na consulta do usuário usando execução assíncrona.
741
-
742
- Args:
743
- query: A consulta do usuário
744
-
745
- Returns:
746
- Uma resposta formatada
747
- """
748
  try:
749
- if not query or not query.strip():
750
- logging.error("Empty query provided")
751
- return "Erro: Por favor, forneça uma consulta não vazia."
752
-
753
- logging.info(f"Generating answer for query: {query}")
754
-
755
- # Verificar se os modelos estão disponíveis
756
- if not self.translator or not self.researcher or not self.presenter:
757
- logging.error("Models not available")
758
- return "Desculpe, o serviço está temporariamente indisponível. Por favor, tente novamente mais tarde."
759
-
760
- # Traduzir a consulta para inglês
761
- translation_prompt = self._format_prompt("translation", query)
762
- logging.info(f"Translation prompt: {translation_prompt}")
763
-
764
- try:
765
- # O método arun retorna um coroutine que precisa ser awaited
766
- result = await self.translator.arun(translation_prompt)
767
- logging.info(f"Translation result type: {type(result)}")
768
-
769
- # Extrair o conteúdo da resposta
770
- translation_content = self._extract_content(result)
771
- logging.info(f"Translation content: {translation_content}")
772
-
773
- if not translation_content or not translation_content.strip():
774
- logging.error("Empty translation result")
775
- return "Desculpe, não foi possível processar sua consulta. Por favor, tente novamente com uma pergunta diferente."
776
-
777
- # Realizar a pesquisa com ferramentas
778
- research_prompt = self._format_prompt("research", translation_content)
779
- logging.info(f"Research prompt: {research_prompt}")
780
-
781
- research_result = await self._run_with_tools(self.researcher, research_prompt)
782
- logging.info(f"Research result type: {type(research_result)}")
783
-
784
- # Extrair o conteúdo da pesquisa
785
- research_content = self._extract_content(research_result)
786
- logging.info(f"Research content: {research_content}")
787
-
788
- # Verificar se a resposta da pesquisa é muito curta
789
- research_length = len(research_content.strip()) if research_content and isinstance(research_content, str) else 0
790
- logging.info(f"Research content length: {research_length} characters")
791
-
792
- if not research_content or not research_content.strip() or research_length < 150:
793
- logging.warning(f"Research result too short ({research_length} chars), trying with a more specific prompt")
794
- # Tentar novamente com um prompt mais específico
795
- enhanced_prompt = f"""Task: Detailed Research
796
-
797
- Instructions:
798
- Provide a comprehensive explanation about '{translation_content}'.
799
- Include definition, characteristics, causes, and current understanding.
800
- Write at least 4-5 paragraphs with detailed information.
801
- Be thorough and informative, covering all important aspects of the topic.
802
- Use clear and accessible language suitable for a general audience.
803
-
804
- Output:"""
805
- logging.info(f"Enhanced research prompt: {enhanced_prompt}")
806
- research_result = await self._run_with_tools(self.researcher, enhanced_prompt)
807
- research_content = self._extract_content(research_result)
808
- research_length = len(research_content.strip()) if research_content and isinstance(research_content, str) else 0
809
- logging.info(f"Enhanced research content: {research_content}")
810
- logging.info(f"Enhanced research content length: {research_length} characters")
811
-
812
- if not research_content or not research_content.strip() or research_length < 150:
813
- logging.warning(f"Research result still too short ({research_length} chars), using default response")
814
- # Usar resposta padrão
815
- logging.info("Using default research content")
816
- research_content = self._get_default_research_content(translation_content)
817
-
818
- # Gerar a apresentação
819
- presentation_prompt = self._format_prompt("presentation", research_content)
820
- logging.info(f"Presentation prompt: {presentation_prompt}")
821
-
822
- # O método arun retorna um coroutine que precisa ser awaited
823
- result = await self.presenter.arun(presentation_prompt)
824
- logging.info(f"Presentation type: {type(result)}")
825
-
826
- presentation_content = self._extract_content(result)
827
- logging.info(f"Presentation content: {presentation_content}")
828
-
829
- presentation_length = len(presentation_content.strip()) if presentation_content and isinstance(presentation_content, str) else 0
830
- logging.info(f"Presentation content length: {presentation_length} characters")
831
-
832
- if not presentation_content or not presentation_content.strip() or presentation_length < 150:
833
- logging.warning(f"Presentation result too short ({presentation_length} chars), using default presentation")
834
-
835
- logging.info("Answer generated successfully")
836
- return presentation_content
837
-
838
- except Exception as e:
839
- logging.error(f"Error during answer generation: {str(e)}")
840
- return f"Desculpe, ocorreu um erro ao processar sua consulta: {str(e)}. Por favor, tente novamente mais tarde."
841
-
842
  except Exception as e:
843
- logging.error(f"Unexpected error in generate_answer_async: {str(e)}")
844
- return "Desculpe, ocorreu um erro inesperado. Por favor, tente novamente mais tarde."
 
 
 
 
 
 
 
 
1
  import logging
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import streamlit as st
4
  from agno.agent import Agent
5
+ from agno.models.ollama import Ollama
6
  from agno.tools.arxiv import ArxivTools
7
  from agno.tools.pubmed import PubmedTools
 
 
 
 
 
 
8
 
9
+ MODEL_PATH = "meta-llama/Llama-3.2-1B"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  class ModelHandler:
 
 
 
 
12
  def __init__(self):
13
+ """Initialize the model handler"""
14
+ self.model = None
15
+ self.tokenizer = None
16
  self.translator = None
17
  self.researcher = None
18
+ self.summarizer = None
19
  self.presenter = None
20
+ self._initialize_model()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
+ def _initialize_model(self):
23
+ """Initialize model and tokenizer"""
24
+ self.model, self.tokenizer = self._load_model()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  self.translator = Agent(
26
  name="Translator",
27
  role="You will translate the query to English",
28
+ model=Ollama(id="llama3.2:1b"),
29
  goal="Translate to English",
30
  instructions=[
31
+ "Translate the query to English"
 
 
32
  ]
33
  )
34
 
 
35
  self.researcher = Agent(
36
  name="Researcher",
37
  role="You are a research scholar who specializes in autism research.",
38
+ model=Ollama(id="llama3.2:1b"),
39
+ tools=[ArxivTools(), PubmedTools()],
40
  instructions=[
41
+ "You need to understand the context of the question to provide the best answer based on your tools."
42
+ "Be precise and provide just enough information to be useful",
43
+ "You must cite the sources used in your answer."
44
+ "You must create an accessible summary.",
45
  "The content must be for people without autism knowledge.",
46
+ "Focus in the main findings of the paper taking in consideration the question.",
47
+ "The answer must be brief."
 
 
 
48
  ],
49
+ show_tool_calls=True,
50
+ )
51
+ self.summarizer = Agent(
52
+ name="Summarizer",
53
+ role="You are a specialist in summarizing research papers for people without autism knowledge.",
54
+ model=Ollama(id="llama3.2:1b"),
55
+ instructions=[
56
+ "You must provide just enough information to be useful",
57
+ "You must cite the sources used in your answer.",
58
+ "You must be clear and concise.",
59
+ "You must create an accessible summary.",
60
+ "The content must be for people without autism knowledge.",
61
+ "Focus in the main findings of the paper taking in consideration the question.",
62
+ "The answer must be brief."
63
+ "Remove everything related to the run itself like: 'Running: transfer_', just use plain text",
64
+ "You must use the language provided by the user to present the results.",
65
+ "Add references to the sources used in the answer.",
66
+ "Add emojis to make the presentation more interactive."
67
+ "Translaste the answer to Portuguese."
68
  ],
69
+ show_tool_calls=True,
70
+ markdown=True,
71
+ add_references=True,
72
  )
73
 
74
  self.presenter = Agent(
75
  name="Presenter",
76
  role="You are a professional researcher who presents the results of the research.",
77
+ model=Ollama(id="llama3.2:1b"),
78
  instructions=[
79
  "You are multilingual",
80
+ "You must present the results in a clear and concise manner.",
81
+ "Clenaup the presentation to make it more readable.",
82
+ "Remove unnecessary information.",
83
+ "Remove everything related to the run itself like: 'Running: transfer_', just use plain text",
84
+ "You must use the language provided by the user to present the results.",
85
+ "Add references to the sources used in the answer.",
86
+ "Add emojis to make the presentation more interactive."
87
+ "Translaste the answer to Portuguese."
88
+ ],
89
+ add_references=True,
90
  )
91
 
 
92
 
93
+ @staticmethod
94
+ @st.cache_resource
95
+ @st.cache_data
96
+ def _load_model():
 
 
 
 
 
 
 
 
97
  try:
98
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
99
+ model = AutoModelForCausalLM.from_pretrained(MODEL_PATH)
100
+ return model, tokenizer
 
 
101
  except Exception as e:
102
+ logging.error(f"Error loading model: {str(e)}")
103
+ return None, None
104
 
105
+ def generate_answer(self, query: str) -> str:
 
 
 
 
 
 
 
 
 
106
  try:
107
+ translator = self.translator.run(query, stream=False)
108
+ logging.info(f"Translated query")
109
+ research = self.researcher.run(translator.content, stream=False)
110
+ logging.info(f"Generated research")
111
+ summary = self.summarizer.run(research.content, stream=False)
112
+ logging.info(f"Generated summary")
113
+ presentation = self.presenter.run(summary.content, stream=False)
114
+ logging.info(f"Generated presentation")
115
+
116
+ if not presentation.content:
117
+ return self._get_fallback_response()
118
+ return presentation.content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  except Exception as e:
120
+ logging.error(f"Error generating answer: {str(e)}")
121
+ return self._get_fallback_response()
122
+
123
+ @staticmethod
124
+ def _get_fallback_response() -> str:
125
+ """Provide a friendly, helpful fallback response"""
126
+ return """
127
+ Peço descula, mas encontrei um erro ao gerar a resposta. Tente novamente ou refaça a sua pergunta.
128
+ """