alem-do-espectro / services /model_handler.py
Last commit not found
raw
history blame
38.7 kB
import logging
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import streamlit as st
from agno.agent import Agent
from agno.tools.arxiv import ArxivTools
from agno.tools.pubmed import PubmedTools
from agno.models.base import Model
from tenacity import retry, stop_after_attempt, wait_exponential
import time
import datetime
import os
MODEL_PATH = "google/flan-t5-small"
# Simple Response class to wrap the model output
class Response:
def __init__(self, content):
# Ensure content is a string and not empty
if content is None:
content = ""
if not isinstance(content, str):
content = str(content)
# Store the content
self.content = content
# Add tool_calls attribute with default empty list
self.tool_calls = []
# Add other attributes that might be needed
self.audio = None
self.images = []
self.citations = []
self.metadata = {}
self.finish_reason = "stop"
self.usage = {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}
# Add timestamp attributes
current_time = time.time()
self.created_at = int(current_time) # Convert to integer
self.created = int(current_time)
self.timestamp = datetime.datetime.now().isoformat()
# Add model info attributes
self.id = "local-model-response"
self.model = "local-huggingface"
self.object = "chat.completion"
self.choices = [{"index": 0, "message": {"role": "assistant", "content": content}, "finish_reason": "stop"}]
# Add additional attributes that might be needed
self.system_fingerprint = ""
self.is_truncated = False
self.role = "assistant"
def __str__(self):
return self.content if self.content else ""
def __repr__(self):
return f"Response(content='{self.content[:50]}{'...' if len(self.content) > 50 else ''}')"
# Personnalized class for local models
class LocalHuggingFaceModel(Model):
def __init__(self, model, tokenizer, max_length=512):
super().__init__(id="local-huggingface")
self.model = model
self.tokenizer = tokenizer
self.max_length = max_length
async def ainvoke(self, prompt: str, **kwargs) -> str:
"""Async invoke method"""
try:
logging.info(f"ainvoke called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
return await self.invoke(prompt, **kwargs)
except Exception as e:
logging.error(f"Error in ainvoke: {str(e)}")
return Response(f"Error in ainvoke: {str(e)}")
async def ainvoke_stream(self, prompt: str, **kwargs):
"""Async streaming invoke method"""
try:
logging.info(f"ainvoke_stream called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
result = await self.invoke(prompt, **kwargs)
yield result
except Exception as e:
logging.error(f"Error in ainvoke_stream: {str(e)}")
yield Response(f"Error in ainvoke_stream: {str(e)}")
def invoke(self, prompt: str, **kwargs) -> str:
"""Synchronous invoke method"""
try:
logging.info(f"Invoking model with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
# Check if prompt is None or empty
if prompt is None:
logging.warning("None prompt provided to invoke method")
return Response("No input provided. Please provide a valid prompt.")
if not isinstance(prompt, str):
logging.warning(f"Non-string prompt provided: {type(prompt)}")
try:
prompt = str(prompt)
logging.info(f"Converted prompt to string: {prompt[:100]}...")
except:
return Response("Invalid input type. Please provide a string prompt.")
if not prompt.strip():
logging.warning("Empty prompt provided to invoke method")
return Response("No input provided. Please provide a non-empty prompt.")
inputs = self.tokenizer(prompt, return_tensors="pt", padding=True)
# Configure generation parameters
generation_config = {
"max_length": self.max_length,
"num_return_sequences": 1,
"do_sample": kwargs.get("do_sample", False),
"temperature": kwargs.get("temperature", 1.0),
"top_p": kwargs.get("top_p", 1.0),
}
# Generate the answer
outputs = self.model.generate(**inputs, **generation_config)
decoded_output = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
# Check if output is empty
if not decoded_output or not decoded_output.strip():
logging.warning("Model generated empty output")
return Response("The model did not generate any output. Please try with a different prompt.")
logging.info(f"Model generated output: {decoded_output[:100]}...")
return Response(decoded_output)
except Exception as e:
logging.error(f"Error in local model generation: {str(e)}")
if hasattr(e, 'args') and len(e.args) > 0:
error_message = e.args[0]
else:
error_message = str(e)
return Response(f"Error during generation: {error_message}")
def invoke_stream(self, prompt: str, **kwargs):
"""Synchronous streaming invoke method"""
try:
logging.info(f"invoke_stream called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
result = self.invoke(prompt, **kwargs)
yield result
except Exception as e:
logging.error(f"Error in invoke_stream: {str(e)}")
yield Response(f"Error in invoke_stream: {str(e)}")
def parse_provider_response(self, response: str) -> str:
"""Parse the provider response"""
return response
def parse_provider_response_delta(self, delta: str) -> str:
"""Parse the provider response delta for streaming"""
return delta
async def aresponse(self, prompt=None, **kwargs):
"""Async response method - required abstract method"""
try:
# Log detalhado de todos os argumentos
logging.info(f"aresponse args: prompt={prompt}, kwargs keys={list(kwargs.keys())}")
# Extrair o prompt das mensagens se estiverem disponíveis
if prompt is None and 'messages' in kwargs and kwargs['messages']:
messages = kwargs['messages']
# Procurar pela mensagem do usuário
for message in messages:
if hasattr(message, 'role') and message.role == 'user' and hasattr(message, 'content'):
prompt = message.content
logging.info(f"Extracted prompt from user message: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
break
# Verificar se o prompt está em kwargs['input']
if prompt is None:
if 'input' in kwargs:
prompt = kwargs.get('input')
logging.info(f"Found prompt in kwargs['input']: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
logging.info(f"aresponse called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
if not prompt or not isinstance(prompt, str) or not prompt.strip():
logging.warning("Empty or invalid prompt in aresponse")
return Response("No input provided. Please provide a valid prompt.")
content = await self.ainvoke(prompt, **kwargs)
return content if isinstance(content, Response) else Response(content)
except Exception as e:
logging.error(f"Error in aresponse: {str(e)}")
return Response(f"Error in aresponse: {str(e)}")
def response(self, prompt=None, **kwargs):
"""Synchronous response method - required abstract method"""
try:
# Log detalhado de todos os argumentos
logging.info(f"response args: prompt={prompt}, kwargs keys={list(kwargs.keys())}")
# Extrair o prompt das mensagens se estiverem disponíveis
if prompt is None and 'messages' in kwargs and kwargs['messages']:
messages = kwargs['messages']
# Procurar pela mensagem do usuário
for message in messages:
if hasattr(message, 'role') and message.role == 'user' and hasattr(message, 'content'):
prompt = message.content
logging.info(f"Extracted prompt from user message: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
break
# Verificar se o prompt está em kwargs['input']
if prompt is None:
if 'input' in kwargs:
prompt = kwargs.get('input')
logging.info(f"Found prompt in kwargs['input']: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
logging.info(f"response called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
if not prompt or not isinstance(prompt, str) or not prompt.strip():
logging.warning("Empty or invalid prompt in response")
return Response("No input provided. Please provide a valid prompt.")
content = self.invoke(prompt, **kwargs)
return content if isinstance(content, Response) else Response(content)
except Exception as e:
logging.error(f"Error in response: {str(e)}")
return Response(f"Error in response: {str(e)}")
def response_stream(self, prompt=None, **kwargs):
"""Synchronous streaming response method - required abstract method"""
try:
# Log detalhado de todos os argumentos
logging.info(f"response_stream args: prompt={prompt}, kwargs keys={list(kwargs.keys())}")
# Extrair o prompt das mensagens se estiverem disponíveis
if prompt is None and 'messages' in kwargs and kwargs['messages']:
messages = kwargs['messages']
# Procurar pela mensagem do usuário
for message in messages:
if hasattr(message, 'role') and message.role == 'user' and hasattr(message, 'content'):
prompt = message.content
logging.info(f"Extracted prompt from user message: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
break
# Verificar se o prompt está em kwargs['input']
if prompt is None:
if 'input' in kwargs:
prompt = kwargs.get('input')
logging.info(f"Found prompt in kwargs['input']: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
logging.info(f"response_stream called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
if not prompt or not isinstance(prompt, str) or not prompt.strip():
logging.warning("Empty or invalid prompt in response_stream")
yield Response("No input provided. Please provide a valid prompt.")
return
for chunk in self.invoke_stream(prompt, **kwargs):
yield chunk if isinstance(chunk, Response) else Response(chunk)
except Exception as e:
logging.error(f"Error in response_stream: {str(e)}")
yield Response(f"Error in response_stream: {str(e)}")
def generate(self, prompt: str, **kwargs):
try:
inputs = self.tokenizer(prompt, return_tensors="pt", padding=True)
# Configure generation parameters
generation_config = {
"max_length": self.max_length,
"num_return_sequences": 1,
"do_sample": kwargs.get("do_sample", False),
"temperature": kwargs.get("temperature", 1.0),
"top_p": kwargs.get("top_p", 1.0),
}
# Generate the answer
outputs = self.model.generate(**inputs, **generation_config)
decoded_output = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
return decoded_output
except Exception as e:
logging.error(f"Error in generate method: {str(e)}")
if hasattr(e, 'args') and len(e.args) > 0:
error_message = e.args[0]
else:
error_message = str(e)
return f"Error during generation: {error_message}"
class DummyModel(Model):
def __init__(self):
super().__init__(id="dummy-model")
async def ainvoke(self, prompt: str, **kwargs) -> str:
"""Async invoke method"""
return await self.invoke(prompt=prompt, **kwargs)
async def ainvoke_stream(self, prompt: str, **kwargs):
"""Async streaming invoke method"""
result = await self.invoke(prompt=prompt, **kwargs)
yield result
def invoke(self, prompt: str, **kwargs) -> str:
"""Synchronous invoke method"""
return Response("Sorry, the model is not available. Please try again later.")
def invoke_stream(self, prompt: str, **kwargs):
"""Synchronous streaming invoke method"""
result = self.invoke(prompt=prompt, **kwargs)
yield result
def parse_provider_response(self, response: str) -> str:
"""Parse the provider response"""
return response
def parse_provider_response_delta(self, delta: str) -> str:
"""Parse the provider response delta for streaming"""
return delta
async def aresponse(self, prompt=None, **kwargs):
"""Async response method - required abstract method"""
try:
# Log detalhado de todos os argumentos
logging.info(f"aresponse args: prompt={prompt}, kwargs keys={list(kwargs.keys())}")
# Extrair o prompt das mensagens se estiverem disponíveis
if prompt is None and 'messages' in kwargs and kwargs['messages']:
messages = kwargs['messages']
# Procurar pela mensagem do usuário
for message in messages:
if hasattr(message, 'role') and message.role == 'user' and hasattr(message, 'content'):
prompt = message.content
logging.info(f"Extracted prompt from user message: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
break
# Verificar se o prompt está em kwargs['input']
if prompt is None:
if 'input' in kwargs:
prompt = kwargs.get('input')
logging.info(f"Found prompt in kwargs['input']: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
logging.info(f"aresponse called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
if not prompt or not isinstance(prompt, str) or not prompt.strip():
logging.warning("Empty or invalid prompt in aresponse")
return Response("No input provided. Please provide a valid prompt.")
content = await self.ainvoke(prompt, **kwargs)
return content if isinstance(content, Response) else Response(content)
except Exception as e:
logging.error(f"Error in aresponse: {str(e)}")
return Response(f"Error in aresponse: {str(e)}")
def response(self, prompt=None, **kwargs):
"""Synchronous response method - required abstract method"""
try:
# Log detalhado de todos os argumentos
logging.info(f"response args: prompt={prompt}, kwargs keys={list(kwargs.keys())}")
# Extrair o prompt das mensagens se estiverem disponíveis
if prompt is None and 'messages' in kwargs and kwargs['messages']:
messages = kwargs['messages']
# Procurar pela mensagem do usuário
for message in messages:
if hasattr(message, 'role') and message.role == 'user' and hasattr(message, 'content'):
prompt = message.content
logging.info(f"Extracted prompt from user message: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
break
# Verificar se o prompt está em kwargs['input']
if prompt is None:
if 'input' in kwargs:
prompt = kwargs.get('input')
logging.info(f"Found prompt in kwargs['input']: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
logging.info(f"response called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
if not prompt or not isinstance(prompt, str) or not prompt.strip():
logging.warning("Empty or invalid prompt in response")
return Response("No input provided. Please provide a valid prompt.")
content = self.invoke(prompt, **kwargs)
return content if isinstance(content, Response) else Response(content)
except Exception as e:
logging.error(f"Error in response: {str(e)}")
return Response(f"Error in response: {str(e)}")
def response_stream(self, prompt=None, **kwargs):
"""Synchronous streaming response method - required abstract method"""
try:
# Log detalhado de todos os argumentos
logging.info(f"response_stream args: prompt={prompt}, kwargs keys={list(kwargs.keys())}")
# Extrair o prompt das mensagens se estiverem disponíveis
if prompt is None and 'messages' in kwargs and kwargs['messages']:
messages = kwargs['messages']
# Procurar pela mensagem do usuário
for message in messages:
if hasattr(message, 'role') and message.role == 'user' and hasattr(message, 'content'):
prompt = message.content
logging.info(f"Extracted prompt from user message: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
break
# Verificar se o prompt está em kwargs['input']
if prompt is None:
if 'input' in kwargs:
prompt = kwargs.get('input')
logging.info(f"Found prompt in kwargs['input']: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
logging.info(f"response_stream called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
if not prompt or not isinstance(prompt, str) or not prompt.strip():
logging.warning("Empty or invalid prompt in response_stream")
yield Response("No input provided. Please provide a valid prompt.")
return
for chunk in self.invoke_stream(prompt, **kwargs):
yield chunk if isinstance(chunk, Response) else Response(chunk)
except Exception as e:
logging.error(f"Error in response_stream: {str(e)}")
yield Response(f"Error in response_stream: {str(e)}")
class ModelHandler:
"""
Classe para gerenciar modelos e gerar respostas.
"""
def __init__(self):
"""
Inicializa o ModelHandler.
"""
self.translator = None
self.researcher = None
self.presenter = None
self.force_default_response = False
# Inicializar modelos
self._load_models()
def _extract_content(self, result):
"""
Extrai o conteúdo de uma resposta do modelo.
Args:
result: A resposta do modelo, que pode ser um objeto RunResponse ou uma string
Returns:
O conteúdo da resposta como string
"""
try:
if result is None:
return ""
if hasattr(result, 'content'):
return result.content
return str(result)
except Exception as e:
logging.error(f"Error extracting content: {str(e)}")
return ""
def _format_prompt(self, prompt_type, content):
"""
Formata o prompt de acordo com o tipo.
Args:
prompt_type: O tipo de prompt (translation, research, presentation)
content: O conteúdo a ser incluído no prompt
Returns:
O prompt formatado
"""
if not content or not content.strip():
logging.warning(f"Empty content provided to _format_prompt for {prompt_type}")
return "No input provided."
if prompt_type == "translation":
return f"""Task: Translate the following text to English
Instructions:
Provide a direct English translation of the input text.
Input: {content}
Output:"""
elif prompt_type == "research":
return f"""Task: Research Assistant
Instructions:
You are a research assistant tasked with providing comprehensive information.
Please provide a detailed explanation about the topic, including:
- Definition and key characteristics
- Causes or origins if applicable
- Current scientific understanding
- Important facts and statistics
- Recent developments or research
- Real-world implications and applications
Aim to write at least 4-5 paragraphs with detailed information.
Be thorough and informative, covering all important aspects of the topic.
Use clear and accessible language suitable for a general audience.
Input: {content}
Output:"""
elif prompt_type == "presentation":
return f"""Task: Presentation Assistant
Instructions:
You are presenting research findings to a general audience.
Please format the information in a clear, engaging, and accessible way.
Include:
- A clear introduction to the topic with a compelling title
- Key points organized with headings or bullet points
- Simple explanations of complex concepts
- A brief conclusion or summary
- Translate the entire response to Portuguese
- Add appropriate emojis to make the presentation more engaging
- Format the text using markdown for better readability
Input: {content}
Output:"""
else:
logging.error(f"Unknown prompt type: {prompt_type}")
return f"Unknown prompt type: {prompt_type}"
@staticmethod
@st.cache_resource
def _load_model():
"""Load the model and tokenizer with retry logic"""
# Define retry decorator for model loading
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
def load_with_retry(model_name):
try:
logging.info(f"Attempting to load model from {model_name}")
# Criar diretório de cache se não existir
cache_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "model_cache")
os.makedirs(cache_dir, exist_ok=True)
# Carregar modelo e tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, cache_dir=cache_dir)
logging.info(f"Successfully loaded model from {model_name}")
return model, tokenizer
except Exception as e:
logging.error(f"Error loading model {model_name}: {str(e)}")
raise
# Lista de modelos para tentar, em ordem de preferência
model_names = ["google/flan-t5-small", "google/flan-t5-base"]
# Tentar carregar cada modelo na lista
for model_name in model_names:
try:
return load_with_retry(model_name)
except Exception as e:
logging.error(f"Failed to load {model_name}: {str(e)}")
continue
# Se todos os modelos falharem, retornar None
logging.error("All models failed to load")
return None, None
def _load_models(self):
"""Carrega os modelos necessários"""
# Inicializar modelo local
base_model = self._initialize_local_model()
self.translator = Agent(
name="Translator",
role="You will translate the query to English",
model=base_model,
goal="Translate to English",
instructions=[
"Translate the query to English"
]
)
self.researcher = Agent(
name="Researcher",
role="You are a research scholar who specializes in autism research.",
model=base_model,
instructions=[
"You need to understand the context of the question to provide the best answer.",
"Be precise and provide detailed information.",
"You must create an accessible explanation.",
"The content must be for people without autism knowledge.",
"Focus on providing comprehensive information about the topic.",
"Include definition, characteristics, causes, and current understanding."
]
)
self.presenter = Agent(
name="Presenter",
role="You are a professional researcher who presents the results of the research.",
model=base_model,
instructions=[
"You are multilingual",
"You must present the results in a clear and engaging manner.",
"Format the information with headings and bullet points.",
"Provide simple explanations of complex concepts.",
"Include a brief conclusion or summary.",
"Add emojis to make the presentation more interactive.",
"Translate the answer to Portuguese."
]
)
def _initialize_local_model(self):
"""Initialize local model as fallback"""
model, tokenizer = self._load_model()
if model is None or tokenizer is None:
# Create a dummy model that returns a helpful message
return DummyModel()
return LocalHuggingFaceModel(model, tokenizer)
def generate_answer(self, query: str) -> str:
"""
Gera uma resposta baseada na consulta do usuário.
Args:
query: A consulta do usuário
Returns:
Uma resposta formatada
"""
try:
if not query or not query.strip():
logging.error("Empty query provided")
return "Erro: Por favor, forneça uma consulta não vazia."
logging.info(f"Generating answer for query: {query}")
# Verificar se os modelos estão disponíveis
if not self.translator or not self.researcher or not self.presenter:
logging.error("Models not available")
return "Desculpe, o serviço está temporariamente indisponível. Por favor, tente novamente mais tarde."
# Traduzir a consulta para inglês
translation_prompt = self._format_prompt("translation", query)
logging.info(f"Translation prompt: {translation_prompt}")
try:
translation_result = self.translator.run(translation_prompt)
logging.info(f"Translation result type: {type(translation_result)}")
# Extrair o conteúdo da resposta
translation_content = self._extract_content(translation_result)
logging.info(f"Translation content: {translation_content}")
if not translation_content or not translation_content.strip():
logging.error("Empty translation result")
return "Desculpe, não foi possível processar sua consulta. Por favor, tente novamente com uma pergunta diferente."
# Se forçar resposta padrão, pular a pesquisa e usar diretamente a resposta padrão
if self.force_default_response:
logging.info("Forcing default response")
research_content = self._get_default_research_content(translation_content)
else:
# Realizar a pesquisa
research_prompt = self._format_prompt("research", translation_content)
logging.info(f"Research prompt: {research_prompt}")
research_result = self.researcher.run(research_prompt)
logging.info(f"Research result type: {type(research_result)}")
# Extrair o conteúdo da pesquisa
research_content = self._extract_content(research_result)
logging.info(f"Research content: {research_content}")
# Verificar se a resposta da pesquisa é muito curta
research_length = len(research_content.strip()) if research_content and isinstance(research_content, str) else 0
logging.info(f"Research content length: {research_length} characters")
if not research_content or not research_content.strip() or research_length < 150:
logging.warning(f"Research result too short ({research_length} chars), trying with a more specific prompt")
# Tentar novamente com um prompt mais específico
enhanced_prompt = f"""Task: Detailed Research
Instructions:
Provide a comprehensive explanation about '{translation_content}'.
Include definition, characteristics, causes, and current understanding.
Write at least 4-5 paragraphs with detailed information.
Be thorough and informative, covering all important aspects of the topic.
Use clear and accessible language suitable for a general audience.
Output:"""
logging.info(f"Enhanced research prompt: {enhanced_prompt}")
research_result = self.researcher.run(enhanced_prompt)
research_content = self._extract_content(research_result)
research_length = len(research_content.strip()) if research_content and isinstance(research_content, str) else 0
logging.info(f"Enhanced research content: {research_content}")
logging.info(f"Enhanced research content length: {research_length} characters")
# Se ainda estiver vazio ou muito curto, usar uma resposta padrão
if not research_content or not research_content.strip() or research_length < 150:
logging.warning(f"Research result still too short ({research_length} chars), using default response")
# Usar resposta padrão
logging.info("Using default research content")
research_content = self._get_default_research_content(translation_content)
# Se forçar resposta padrão, pular a apresentação e usar diretamente a resposta padrão
if self.force_default_response:
logging.info("Forcing default presentation")
presentation_content = self._get_default_presentation_content()
else:
# Apresentar os resultados
presentation_prompt = self._format_prompt("presentation", research_content)
logging.info(f"Presentation prompt: {presentation_prompt}")
presentation_result = self.presenter.run(presentation_prompt)
logging.info(f"Presentation type: {type(presentation_result)}")
# Extrair o conteúdo da apresentação
presentation_content = self._extract_content(presentation_result)
logging.info(f"Presentation content: {presentation_content}")
# Verificar se a apresentação é muito curta
presentation_length = len(presentation_content.strip()) if presentation_content and isinstance(presentation_content, str) else 0
logging.info(f"Presentation content length: {presentation_length} characters")
if not presentation_content or not presentation_content.strip() or presentation_length < 150:
logging.warning(f"Presentation result too short ({presentation_length} chars), using default presentation")
# Usar apresentação padrão
logging.info("Using default presentation content")
presentation_content = self._get_default_presentation_content()
logging.info("Answer generated successfully")
return presentation_content
except Exception as e:
logging.error(f"Error during answer generation: {str(e)}")
return f"Desculpe, ocorreu um erro ao processar sua consulta: {str(e)}. Por favor, tente novamente mais tarde."
except Exception as e:
logging.error(f"Unexpected error in generate_answer: {str(e)}")
return "Desculpe, ocorreu um erro inesperado. Por favor, tente novamente mais tarde."
def _get_default_research_content(self, topic: str) -> str:
"""
Retorna um conteúdo de pesquisa padrão para o tópico.
Args:
topic: O tópico da pesquisa
Returns:
Conteúdo de pesquisa padrão
"""
return f"""Information about {topic}:
Autism is a complex neurodevelopmental disorder that affects communication, social interaction, and behavior. It is characterized by challenges with social skills, repetitive behaviors, speech, and nonverbal communication.
The condition is part of a broader category called autism spectrum disorder (ASD), which reflects the wide variation in challenges and strengths possessed by each person with autism. Some individuals with autism may require significant support in their daily lives, while others may need less support and, in some cases, live entirely independently.
Autism is believed to be caused by a combination of genetic and environmental factors. Research suggests that certain genetic mutations may increase the risk of autism, as well as various environmental factors that influence early brain development. There is no single cause for autism, making it a complex condition to understand and treat.
Early diagnosis and intervention are important for improving outcomes for individuals with autism. Various therapies and support strategies can help people with autism develop skills and cope with challenges. These may include behavioral therapy, speech therapy, occupational therapy, and educational support.
It's important to note that autism is not a disease to be cured but a different way of experiencing and interacting with the world. Many people with autism have exceptional abilities in visual skills, music, math, and art, among other areas."""
def _get_default_presentation_content(self) -> str:
"""
Retorna um conteúdo de apresentação padrão.
Returns:
Conteúdo de apresentação padrão
"""
return """🧠 **Autismo: Entendendo o Espectro** 🧠
## O que é o Autismo?
O autismo é uma condição neurológica complexa que afeta a comunicação, interação social e comportamento. É caracterizado por desafios com habilidades sociais, comportamentos repetitivos, fala e comunicação não verbal.
## Características Principais:
- 🔄 Comportamentos repetitivos e interesses restritos
- 🗣️ Dificuldades na comunicação verbal e não verbal
- 👥 Desafios nas interações sociais
- 🎭 Dificuldade em entender expressões faciais e emoções
- 🔊 Sensibilidade sensorial (sons, luzes, texturas)
## Causas e Origens:
O autismo é causado por uma combinação de fatores genéticos e ambientais. Pesquisas sugerem que certas mutações genéticas podem aumentar o risco, assim como vários fatores ambientais que influenciam o desenvolvimento inicial do cérebro.
## Pontos Importantes:
- 📊 O autismo afeta cada pessoa de maneira diferente (por isso é chamado de "espectro")
- 🧩 Diagnóstico precoce e intervenção melhoram os resultados
- 💪 Muitas pessoas com autismo têm habilidades excepcionais em áreas específicas
- 🌈 O autismo não é uma doença a ser curada, mas uma forma diferente de experimentar o mundo
## Conclusão:
Compreender o autismo é essencial para criar uma sociedade mais inclusiva. Cada pessoa com autismo tem suas próprias forças e desafios únicos, e merece apoio e aceitação.
*Fonte: Pesquisas científicas atuais sobre transtornos do espectro autista*
"""