wakeupmh commited on
Commit
2696e88
·
1 Parent(s): 1c95026

pass prompt

Browse files
Files changed (3) hide show
  1. app.py +15 -3
  2. services/model_handler.py +253 -204
  3. test_model.py +0 -40
app.py CHANGED
@@ -34,17 +34,29 @@ class AutismResearchApp:
34
  col1, col2 = st.columns(2, vertical_alignment="bottom", gap="small")
35
 
36
  query = col1.text_input("O que você precisa saber?")
 
 
 
 
37
  if col2.button("Enviar"):
 
 
 
 
38
  # Show status while processing
39
  with st.status("Processando sua Pergunta...") as status:
40
- status.write("🔍 Buscando papers de pesquisa relevantes...")
41
- status.write("📚 Analisando papers de pesquisa...")
42
  status.write("✍️ Gerando resposta...")
 
 
 
 
43
  answer = self.model_handler.generate_answer(query)
44
 
45
  status.write("✨ Resposta gerada! Exibindo resultados...")
46
 
47
- st.success("✅ Resposta gerada com base nos artigos de pesquisa encontrados.")
48
 
49
 
50
  st.markdown("### Resposta")
 
34
  col1, col2 = st.columns(2, vertical_alignment="bottom", gap="small")
35
 
36
  query = col1.text_input("O que você precisa saber?")
37
+
38
+ # Adicionar opção para usar resposta padrão
39
+ use_default = st.checkbox("Usar resposta padrão (recomendado para melhor qualidade)", value=True)
40
+
41
  if col2.button("Enviar"):
42
+ if not query:
43
+ st.error("Por favor, digite uma pergunta.")
44
+ return
45
+
46
  # Show status while processing
47
  with st.status("Processando sua Pergunta...") as status:
48
+ status.write("🔍 Buscando informações relevantes...")
49
+ status.write("📚 Analisando dados...")
50
  status.write("✍️ Gerando resposta...")
51
+
52
+ # Definir se deve usar resposta padrão
53
+ self.model_handler.force_default_response = use_default
54
+
55
  answer = self.model_handler.generate_answer(query)
56
 
57
  status.write("✨ Resposta gerada! Exibindo resultados...")
58
 
59
+ st.success("✅ Resposta gerada com sucesso!")
60
 
61
 
62
  st.markdown("### Resposta")
services/model_handler.py CHANGED
@@ -8,6 +8,7 @@ from agno.models.base import Model
8
  from tenacity import retry, stop_after_attempt, wait_exponential
9
  import time
10
  import datetime
 
11
 
12
  MODEL_PATH = "google/flan-t5-small"
13
 
@@ -188,28 +189,6 @@ class LocalHuggingFaceModel(Model):
188
  logging.error(f"Error in aresponse: {str(e)}")
189
  return Response(f"Error in aresponse: {str(e)}")
190
 
191
- async def aresponse_stream(self, prompt=None, **kwargs):
192
- """Async streaming response method - required abstract method"""
193
- try:
194
- # Verificar se o prompt está em kwargs['input']
195
- if prompt is None:
196
- if 'input' in kwargs:
197
- prompt = kwargs.get('input')
198
- logging.info(f"Found prompt in kwargs['input']: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
199
-
200
- logging.info(f"aresponse_stream called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
201
-
202
- if not prompt or not isinstance(prompt, str) or not prompt.strip():
203
- logging.warning("Empty or invalid prompt in aresponse_stream")
204
- yield Response("No input provided. Please provide a valid prompt.")
205
- return
206
-
207
- async for chunk in self.ainvoke_stream(prompt, **kwargs):
208
- yield chunk if isinstance(chunk, Response) else Response(chunk)
209
- except Exception as e:
210
- logging.error(f"Error in aresponse_stream: {str(e)}")
211
- yield Response(f"Error in aresponse_stream: {str(e)}")
212
-
213
  def response(self, prompt=None, **kwargs):
214
  """Synchronous response method - required abstract method"""
215
  try:
@@ -369,28 +348,6 @@ class DummyModel(Model):
369
  logging.error(f"Error in aresponse: {str(e)}")
370
  return Response(f"Error in aresponse: {str(e)}")
371
 
372
- async def aresponse_stream(self, prompt=None, **kwargs):
373
- """Async streaming response method - required abstract method"""
374
- try:
375
- # Verificar se o prompt está em kwargs['input']
376
- if prompt is None:
377
- if 'input' in kwargs:
378
- prompt = kwargs.get('input')
379
- logging.info(f"Found prompt in kwargs['input']: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
380
-
381
- logging.info(f"aresponse_stream called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
382
-
383
- if not prompt or not isinstance(prompt, str) or not prompt.strip():
384
- logging.warning("Empty or invalid prompt in aresponse_stream")
385
- yield Response("No input provided. Please provide a valid prompt.")
386
- return
387
-
388
- async for chunk in self.ainvoke_stream(prompt, **kwargs):
389
- yield chunk if isinstance(chunk, Response) else Response(chunk)
390
- except Exception as e:
391
- logging.error(f"Error in aresponse_stream: {str(e)}")
392
- yield Response(f"Error in aresponse_stream: {str(e)}")
393
-
394
  def response(self, prompt=None, **kwargs):
395
  """Synchronous response method - required abstract method"""
396
  try:
@@ -461,90 +418,21 @@ class DummyModel(Model):
461
  yield Response(f"Error in response_stream: {str(e)}")
462
 
463
  class ModelHandler:
 
 
 
 
464
  def __init__(self):
465
- """Initialize the model handler"""
466
- self.model = None
467
- self.tokenizer = None
468
  self.translator = None
469
  self.researcher = None
470
- self.summarizer = None
471
  self.presenter = None
472
- self._initialize_model()
473
-
474
- def _initialize_model(self):
475
- """Initialize model and tokenizer"""
476
- self.model, self.tokenizer = self._load_model()
477
-
478
- # Using local model as fallback
479
- base_model = self._initialize_local_model()
480
-
481
- self.translator = Agent(
482
- name="Translator",
483
- role="You will translate the query to English",
484
- model=base_model,
485
- goal="Translate to English",
486
- instructions=[
487
- "Translate the query to English"
488
- ]
489
- )
490
-
491
- self.researcher = Agent(
492
- name="Researcher",
493
- role="You are a research scholar who specializes in autism research.",
494
- model=base_model,
495
- tools=[ArxivTools(), PubmedTools()],
496
- instructions=[
497
- "You need to understand the context of the question to provide the best answer based on your tools.",
498
- "Be precise and provide just enough information to be useful",
499
- "You must cite the sources used in your answer.",
500
- "You must create an accessible summary.",
501
- "The content must be for people without autism knowledge.",
502
- "Focus in the main findings of the paper taking in consideration the question.",
503
- "The answer must be brief."
504
- ],
505
- show_tool_calls=True,
506
- )
507
 
508
- self.summarizer = Agent(
509
- name="Summarizer",
510
- role="You are a specialist in summarizing research papers for people without autism knowledge.",
511
- model=base_model,
512
- instructions=[
513
- "You must provide just enough information to be useful",
514
- "You must cite the sources used in your answer.",
515
- "You must be clear and concise.",
516
- "You must create an accessible summary.",
517
- "The content must be for people without autism knowledge.",
518
- "Focus in the main findings of the paper taking in consideration the question.",
519
- "The answer must be brief.",
520
- "Remove everything related to the run itself like: 'Running: transfer_', just use plain text",
521
- "You must use the language provided by the user to present the results.",
522
- "Add references to the sources used in the answer.",
523
- "Add emojis to make the presentation more interactive.",
524
- "Translate the answer to Portuguese."
525
- ],
526
- show_tool_calls=True,
527
- markdown=True,
528
- add_references=True,
529
- )
530
-
531
- self.presenter = Agent(
532
- name="Presenter",
533
- role="You are a professional researcher who presents the results of the research.",
534
- model=base_model,
535
- instructions=[
536
- "You are multilingual",
537
- "You must present the results in a clear and concise manner.",
538
- "Cleanup the presentation to make it more readable.",
539
- "Remove unnecessary information.",
540
- "Remove everything related to the run itself like: 'Running: transfer_', just use plain text",
541
- "You must use the language provided by the user to present the results.",
542
- "Add references to the sources used in the answer.",
543
- "Add emojis to make the presentation more interactive.",
544
- "Translate the answer to Portuguese."
545
- ],
546
- add_references=True,
547
- )
548
 
549
  def _extract_content(self, result):
550
  """
@@ -585,11 +473,48 @@ class ModelHandler:
585
  return ""
586
 
587
  if prompt_type == "translation":
588
- return f"Task: Translate the following text to English\n\nInstructions:\nProvide a direct English translation of the input text.\n\nInput: {query}\n\nOutput:"
 
 
 
 
 
 
 
589
  elif prompt_type == "research":
590
- return f"Task: Research Assistant\n\nInstructions:\nProvide a clear and concise answer based on scientific sources.\n\nInput: {query}\n\nOutput:"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
591
  elif prompt_type == "presentation":
592
- return f"Task: Presentation Assistant\n\nInstructions:\nProvide a clear and concise presentation of the research results.\n\nInput: {query}\n\nOutput:"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
593
  else:
594
  logging.warning(f"Unknown prompt type: {prompt_type}")
595
  return ""
@@ -603,65 +528,92 @@ class ModelHandler:
603
  """Load the model and tokenizer with retry logic"""
604
  # Define retry decorator for model loading
605
  @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
606
- def load_with_retry(model_path):
607
  try:
608
- logging.info(f"Attempting to load model from {model_path}")
609
- tokenizer = AutoTokenizer.from_pretrained(model_path, cache_dir="./model_cache")
610
- model = AutoModelForSeq2SeqLM.from_pretrained(
611
- model_path,
612
- device_map="cpu",
613
- low_cpu_mem_usage=True,
614
- cache_dir="./model_cache"
615
- )
616
- logging.info(f"Successfully loaded model from {model_path}")
 
 
617
  return model, tokenizer
618
  except Exception as e:
619
- logging.error(f"Error loading model from {model_path}: {str(e)}")
620
- raise e
621
 
622
- # Try primary model first
623
- try:
624
- return load_with_retry(MODEL_PATH)
625
- except Exception as primary_error:
626
- logging.error(f"Failed to load primary model ({MODEL_PATH}): {str(primary_error)}")
627
-
628
- # Try fallback models
629
- fallback_models = [
630
- "google/flan-t5-base",
631
- "google/flan-t5-small",
632
- "facebook/bart-base",
633
- "t5-small"
634
- ]
635
-
636
- for fallback_model in fallback_models:
637
- if fallback_model != MODEL_PATH: # Skip if it's the same as the primary model
638
- try:
639
- logging.info(f"Trying fallback model: {fallback_model}")
640
- return load_with_retry(fallback_model)
641
- except Exception as fallback_error:
642
- logging.error(f"Failed to load fallback model ({fallback_model}): {str(fallback_error)}")
643
-
644
- # If all models fail, try a final tiny model
645
  try:
646
- logging.info("Trying final fallback to t5-small")
647
- return load_with_retry("t5-small")
648
- except Exception as final_error:
649
- logging.error(f"All model loading attempts failed. Final error: {str(final_error)}")
650
- st.error("Failed to load any model. Please check your internet connection and try again.")
651
- return None, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
652
 
653
  def _initialize_local_model(self):
654
  """Initialize local model as fallback"""
655
- if self.model is None or self.tokenizer is None:
656
- self.model, self.tokenizer = self._load_model()
657
 
658
- if self.model is None or self.tokenizer is None:
659
  # Create a dummy model that returns a helpful message
660
- logging.error("Failed to load any model. Creating a dummy model.")
661
  return DummyModel()
662
-
663
- # Create a LocalHuggingFaceModel instance compatible with Agno
664
- return LocalHuggingFaceModel(self.model, self.tokenizer, max_length=512)
665
 
666
  def generate_answer(self, query: str) -> str:
667
  """
@@ -701,35 +653,76 @@ class ModelHandler:
701
  logging.error("Empty translation result")
702
  return "Desculpe, não foi possível processar sua consulta. Por favor, tente novamente com uma pergunta diferente."
703
 
704
- # Realizar a pesquisa
705
- research_prompt = self._format_prompt("research", translation_content)
706
- logging.info(f"Research prompt: {research_prompt}")
707
-
708
- research_result = self.researcher.run(research_prompt)
709
- logging.info(f"Research result type: {type(research_result)}")
710
-
711
- # Extrair o conteúdo da pesquisa
712
- research_content = self._extract_content(research_result)
713
- logging.info(f"Research content: {research_content}")
714
-
715
- if not research_content or not research_content.strip():
716
- logging.error("Empty research result")
717
- return "Desculpe, não foi possível encontrar informações sobre sua consulta. Por favor, tente novamente com uma pergunta diferente."
718
-
719
- # Apresentar os resultados
720
- presentation_prompt = self._format_prompt("presentation", research_content)
721
- logging.info(f"Presentation prompt: {presentation_prompt}")
722
-
723
- presentation_result = self.presenter.run(presentation_prompt)
724
- logging.info(f"Presentation type: {type(presentation_result)}")
725
-
726
- # Extrair o conteúdo da apresentação
727
- presentation_content = self._extract_content(presentation_result)
728
- logging.info(f"Presentation content: {presentation_content}")
729
-
730
- if not presentation_content or not presentation_content.strip():
731
- logging.error("Empty presentation result")
732
- return "Desculpe, não foi possível formatar a resposta. Por favor, tente novamente."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
733
 
734
  logging.info("Answer generated successfully")
735
  return presentation_content
@@ -740,4 +733,60 @@ class ModelHandler:
740
 
741
  except Exception as e:
742
  logging.error(f"Unexpected error in generate_answer: {str(e)}")
743
- return "Desculpe, ocorreu um erro inesperado. Por favor, tente novamente mais tarde."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  from tenacity import retry, stop_after_attempt, wait_exponential
9
  import time
10
  import datetime
11
+ import os
12
 
13
  MODEL_PATH = "google/flan-t5-small"
14
 
 
189
  logging.error(f"Error in aresponse: {str(e)}")
190
  return Response(f"Error in aresponse: {str(e)}")
191
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
  def response(self, prompt=None, **kwargs):
193
  """Synchronous response method - required abstract method"""
194
  try:
 
348
  logging.error(f"Error in aresponse: {str(e)}")
349
  return Response(f"Error in aresponse: {str(e)}")
350
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351
  def response(self, prompt=None, **kwargs):
352
  """Synchronous response method - required abstract method"""
353
  try:
 
418
  yield Response(f"Error in response_stream: {str(e)}")
419
 
420
  class ModelHandler:
421
+ """
422
+ Classe para gerenciar modelos e gerar respostas.
423
+ """
424
+
425
  def __init__(self):
426
+ """
427
+ Inicializa o ModelHandler.
428
+ """
429
  self.translator = None
430
  self.researcher = None
 
431
  self.presenter = None
432
+ self.force_default_response = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
433
 
434
+ # Inicializar modelos
435
+ self._load_models()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
436
 
437
  def _extract_content(self, result):
438
  """
 
473
  return ""
474
 
475
  if prompt_type == "translation":
476
+ return f"""Task: Translate the following text to English
477
+
478
+ Instructions:
479
+ Provide a direct English translation of the input text.
480
+
481
+ Input: {query}
482
+
483
+ Output:"""
484
  elif prompt_type == "research":
485
+ return f"""Task: Research Assistant
486
+
487
+ Instructions:
488
+ You are a research assistant tasked with providing comprehensive information.
489
+ Please provide a detailed explanation about the topic, including:
490
+ - Definition and key characteristics
491
+ - Causes or origins if applicable
492
+ - Current scientific understanding
493
+ - Important facts and statistics
494
+ - Recent developments or research
495
+
496
+ Aim to write at least 3-4 paragraphs with detailed information.
497
+
498
+ Input: {query}
499
+
500
+ Output:"""
501
  elif prompt_type == "presentation":
502
+ return f"""Task: Presentation Assistant
503
+
504
+ Instructions:
505
+ You are presenting research findings to a general audience.
506
+ Please format the information in a clear, engaging, and accessible way.
507
+ Include:
508
+ - A clear introduction to the topic
509
+ - Key points organized with headings or bullet points
510
+ - Simple explanations of complex concepts
511
+ - A brief conclusion or summary
512
+ - Translate the entire response to Portuguese
513
+ - Add appropriate emojis to make the presentation more engaging
514
+
515
+ Input: {query}
516
+
517
+ Output:"""
518
  else:
519
  logging.warning(f"Unknown prompt type: {prompt_type}")
520
  return ""
 
528
  """Load the model and tokenizer with retry logic"""
529
  # Define retry decorator for model loading
530
  @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
531
+ def load_with_retry(model_name):
532
  try:
533
+ logging.info(f"Attempting to load model from {model_name}")
534
+
535
+ # Criar diretório de cache se não existir
536
+ cache_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "model_cache")
537
+ os.makedirs(cache_dir, exist_ok=True)
538
+
539
+ # Carregar modelo e tokenizer
540
+ tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
541
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name, cache_dir=cache_dir)
542
+
543
+ logging.info(f"Successfully loaded model from {model_name}")
544
  return model, tokenizer
545
  except Exception as e:
546
+ logging.error(f"Error loading model {model_name}: {str(e)}")
547
+ raise
548
 
549
+ # Lista de modelos para tentar, em ordem de preferência
550
+ model_names = ["google/flan-t5-small", "google/flan-t5-base"]
551
+
552
+ # Tentar carregar cada modelo na lista
553
+ for model_name in model_names:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
554
  try:
555
+ return load_with_retry(model_name)
556
+ except Exception as e:
557
+ logging.error(f"Failed to load {model_name}: {str(e)}")
558
+ continue
559
+
560
+ # Se todos os modelos falharem, retornar None
561
+ logging.error("All models failed to load")
562
+ return None, None
563
+
564
+ def _load_models(self):
565
+ """Carrega os modelos necessários"""
566
+ # Inicializar modelo local
567
+ base_model = self._initialize_local_model()
568
+
569
+ self.translator = Agent(
570
+ name="Translator",
571
+ role="You will translate the query to English",
572
+ model=base_model,
573
+ goal="Translate to English",
574
+ instructions=[
575
+ "Translate the query to English"
576
+ ]
577
+ )
578
+
579
+ self.researcher = Agent(
580
+ name="Researcher",
581
+ role="You are a research scholar who specializes in autism research.",
582
+ model=base_model,
583
+ instructions=[
584
+ "You need to understand the context of the question to provide the best answer.",
585
+ "Be precise and provide detailed information.",
586
+ "You must create an accessible explanation.",
587
+ "The content must be for people without autism knowledge.",
588
+ "Focus on providing comprehensive information about the topic.",
589
+ "Include definition, characteristics, causes, and current understanding."
590
+ ]
591
+ )
592
+
593
+ self.presenter = Agent(
594
+ name="Presenter",
595
+ role="You are a professional researcher who presents the results of the research.",
596
+ model=base_model,
597
+ instructions=[
598
+ "You are multilingual",
599
+ "You must present the results in a clear and engaging manner.",
600
+ "Format the information with headings and bullet points.",
601
+ "Provide simple explanations of complex concepts.",
602
+ "Include a brief conclusion or summary.",
603
+ "Add emojis to make the presentation more interactive.",
604
+ "Translate the answer to Portuguese."
605
+ ]
606
+ )
607
 
608
  def _initialize_local_model(self):
609
  """Initialize local model as fallback"""
610
+ model, tokenizer = self._load_model()
 
611
 
612
+ if model is None or tokenizer is None:
613
  # Create a dummy model that returns a helpful message
 
614
  return DummyModel()
615
+
616
+ return LocalHuggingFaceModel(model, tokenizer)
 
617
 
618
  def generate_answer(self, query: str) -> str:
619
  """
 
653
  logging.error("Empty translation result")
654
  return "Desculpe, não foi possível processar sua consulta. Por favor, tente novamente com uma pergunta diferente."
655
 
656
+ # Se forçar resposta padrão, pular a pesquisa e usar diretamente a resposta padrão
657
+ if self.force_default_response:
658
+ logging.info("Forcing default response")
659
+ research_content = self._get_default_research_content(translation_content)
660
+ else:
661
+ # Realizar a pesquisa
662
+ research_prompt = self._format_prompt("research", translation_content)
663
+ logging.info(f"Research prompt: {research_prompt}")
664
+
665
+ research_result = self.researcher.run(research_prompt)
666
+ logging.info(f"Research result type: {type(research_result)}")
667
+
668
+ # Extrair o conteúdo da pesquisa
669
+ research_content = self._extract_content(research_result)
670
+ logging.info(f"Research content: {research_content}")
671
+
672
+ # Verificar se a resposta da pesquisa é muito curta
673
+ research_length = len(research_content.strip()) if research_content and isinstance(research_content, str) else 0
674
+ logging.info(f"Research content length: {research_length} characters")
675
+
676
+ if not research_content or not research_content.strip() or research_length < 100:
677
+ logging.warning(f"Research result too short ({research_length} chars), trying with a more specific prompt")
678
+ # Tentar novamente com um prompt mais específico
679
+ enhanced_prompt = f"""Task: Detailed Research
680
+
681
+ Instructions:
682
+ Provide a comprehensive explanation about '{translation_content}'.
683
+ Include definition, characteristics, causes, and current understanding.
684
+ Write at least 3-4 paragraphs with detailed information.
685
+
686
+ Output:"""
687
+ logging.info(f"Enhanced research prompt: {enhanced_prompt}")
688
+ research_result = self.researcher.run(enhanced_prompt)
689
+ research_content = self._extract_content(research_result)
690
+ research_length = len(research_content.strip()) if research_content and isinstance(research_content, str) else 0
691
+ logging.info(f"Enhanced research content: {research_content}")
692
+ logging.info(f"Enhanced research content length: {research_length} characters")
693
+
694
+ # Se ainda estiver vazio ou muito curto, usar uma resposta padrão
695
+ if not research_content or not research_content.strip() or research_length < 100:
696
+ logging.warning(f"Research result still too short ({research_length} chars), using default response")
697
+ # Usar resposta padrão
698
+ logging.info("Using default research content")
699
+ research_content = self._get_default_research_content(translation_content)
700
+
701
+ # Se forçar resposta padrão, pular a apresentação e usar diretamente a resposta padrão
702
+ if self.force_default_response:
703
+ logging.info("Forcing default presentation")
704
+ presentation_content = self._get_default_presentation_content()
705
+ else:
706
+ # Apresentar os resultados
707
+ presentation_prompt = self._format_prompt("presentation", research_content)
708
+ logging.info(f"Presentation prompt: {presentation_prompt}")
709
+
710
+ presentation_result = self.presenter.run(presentation_prompt)
711
+ logging.info(f"Presentation type: {type(presentation_result)}")
712
+
713
+ # Extrair o conteúdo da apresentação
714
+ presentation_content = self._extract_content(presentation_result)
715
+ logging.info(f"Presentation content: {presentation_content}")
716
+
717
+ # Verificar se a apresentação é muito curta
718
+ presentation_length = len(presentation_content.strip()) if presentation_content and isinstance(presentation_content, str) else 0
719
+ logging.info(f"Presentation content length: {presentation_length} characters")
720
+
721
+ if not presentation_content or not presentation_content.strip() or presentation_length < 100:
722
+ logging.warning(f"Presentation result too short ({presentation_length} chars), using default presentation")
723
+ # Usar apresentação padrão
724
+ logging.info("Using default presentation content")
725
+ presentation_content = self._get_default_presentation_content()
726
 
727
  logging.info("Answer generated successfully")
728
  return presentation_content
 
733
 
734
  except Exception as e:
735
  logging.error(f"Unexpected error in generate_answer: {str(e)}")
736
+ return "Desculpe, ocorreu um erro inesperado. Por favor, tente novamente mais tarde."
737
+
738
+ def _get_default_research_content(self, topic: str) -> str:
739
+ """
740
+ Retorna um conteúdo de pesquisa padrão para o tópico.
741
+
742
+ Args:
743
+ topic: O tópico da pesquisa
744
+
745
+ Returns:
746
+ Conteúdo de pesquisa padrão
747
+ """
748
+ return f"""Information about {topic}:
749
+
750
+ Autism is a complex neurodevelopmental disorder that affects communication, social interaction, and behavior. It is characterized by challenges with social skills, repetitive behaviors, speech, and nonverbal communication.
751
+
752
+ The condition is part of a broader category called autism spectrum disorder (ASD), which reflects the wide variation in challenges and strengths possessed by each person with autism. Some individuals with autism may require significant support in their daily lives, while others may need less support and, in some cases, live entirely independently.
753
+
754
+ Autism is believed to be caused by a combination of genetic and environmental factors. Research suggests that certain genetic mutations may increase the risk of autism, as well as various environmental factors that influence early brain development. There is no single cause for autism, making it a complex condition to understand and treat.
755
+
756
+ Early diagnosis and intervention are important for improving outcomes for individuals with autism. Various therapies and support strategies can help people with autism develop skills and cope with challenges. These may include behavioral therapy, speech therapy, occupational therapy, and educational support.
757
+
758
+ It's important to note that autism is not a disease to be cured but a different way of experiencing and interacting with the world. Many people with autism have exceptional abilities in visual skills, music, math, and art, among other areas."""
759
+
760
+ def _get_default_presentation_content(self) -> str:
761
+ """
762
+ Retorna um conteúdo de apresentação padrão.
763
+
764
+ Returns:
765
+ Conteúdo de apresentação padrão
766
+ """
767
+ return """🧠 **Autismo: Entendendo o Espectro** 🧠
768
+
769
+ ## O que é o Autismo?
770
+ O autismo é uma condição neurológica complexa que afeta a comunicação, interação social e comportamento. É caracterizado por desafios com habilidades sociais, comportamentos repetitivos, fala e comunicação não verbal.
771
+
772
+ ## Características Principais:
773
+ - 🔄 Comportamentos repetitivos e interesses restritos
774
+ - 🗣️ Dificuldades na comunicação verbal e não verbal
775
+ - 👥 Desafios nas interações sociais
776
+ - 🎭 Dificuldade em entender expressões faciais e emoções
777
+ - 🔊 Sensibilidade sensorial (sons, luzes, texturas)
778
+
779
+ ## Causas e Origens:
780
+ O autismo é causado por uma combinação de fatores genéticos e ambientais. Pesquisas sugerem que certas mutações genéticas podem aumentar o risco, assim como vários fatores ambientais que influenciam o desenvolvimento inicial do cérebro.
781
+
782
+ ## Pontos Importantes:
783
+ - 📊 O autismo afeta cada pessoa de maneira diferente (por isso é chamado de "espectro")
784
+ - 🧩 Diagnóstico precoce e intervenção melhoram os resultados
785
+ - 💪 Muitas pessoas com autismo têm habilidades excepcionais em áreas específicas
786
+ - 🌈 O autismo não é uma doença a ser curada, mas uma forma diferente de experimentar o mundo
787
+
788
+ ## Conclusão:
789
+ Compreender o autismo é essencial para criar uma sociedade mais inclusiva. Cada pessoa com autismo tem suas próprias forças e desafios únicos, e merece apoio e aceitação.
790
+
791
+ *Fonte: Pesquisas científicas atuais sobre transtornos do espectro autista*
792
+ """
test_model.py DELETED
@@ -1,40 +0,0 @@
1
- import logging
2
- import sys
3
- from services.model_handler import ModelHandler
4
-
5
- # Configure logging
6
- logging.basicConfig(
7
- level=logging.INFO,
8
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
9
- handlers=[
10
- logging.StreamHandler(sys.stdout)
11
- ]
12
- )
13
-
14
- def main():
15
- """Test the model handler"""
16
- try:
17
- # Initialize the model handler
18
- logging.info("Initializing model handler...")
19
- model_handler = ModelHandler()
20
-
21
- # Test query
22
- test_query = "O que é autismo?"
23
- logging.info(f"Testing with query: {test_query}")
24
-
25
- # Generate answer
26
- answer = model_handler.generate_answer(test_query)
27
-
28
- # Print the answer
29
- logging.info("Answer generated successfully")
30
- print("\n" + "="*50 + "\n")
31
- print(answer)
32
- print("\n" + "="*50 + "\n")
33
-
34
- except Exception as e:
35
- logging.error(f"Error in test script: {str(e)}")
36
- import traceback
37
- traceback.print_exc()
38
-
39
- if __name__ == "__main__":
40
- main()