Spaces:
Sleeping
Sleeping
pass prompt
Browse files- app.py +2 -5
- services/model_handler.py +32 -30
- test_model.py +46 -0
app.py
CHANGED
@@ -35,9 +35,6 @@ class AutismResearchApp:
|
|
35 |
|
36 |
query = col1.text_input("O que você precisa saber?")
|
37 |
|
38 |
-
# Adicionar opção para usar resposta padrão
|
39 |
-
use_default = st.checkbox("Usar resposta padrão (recomendado para melhor qualidade)", value=True)
|
40 |
-
|
41 |
if col2.button("Enviar"):
|
42 |
if not query:
|
43 |
st.error("Por favor, digite uma pergunta.")
|
@@ -49,8 +46,8 @@ class AutismResearchApp:
|
|
49 |
status.write("📚 Analisando dados...")
|
50 |
status.write("✍️ Gerando resposta...")
|
51 |
|
52 |
-
#
|
53 |
-
self.model_handler.force_default_response =
|
54 |
|
55 |
answer = self.model_handler.generate_answer(query)
|
56 |
|
|
|
35 |
|
36 |
query = col1.text_input("O que você precisa saber?")
|
37 |
|
|
|
|
|
|
|
38 |
if col2.button("Enviar"):
|
39 |
if not query:
|
40 |
st.error("Por favor, digite uma pergunta.")
|
|
|
46 |
status.write("📚 Analisando dados...")
|
47 |
status.write("✍️ Gerando resposta...")
|
48 |
|
49 |
+
# Sempre usar o modelo, nunca a resposta padrão
|
50 |
+
self.model_handler.force_default_response = False
|
51 |
|
52 |
answer = self.model_handler.generate_answer(query)
|
53 |
|
services/model_handler.py
CHANGED
@@ -456,33 +456,32 @@ class ModelHandler:
|
|
456 |
logging.error(f"Error extracting content: {str(e)}")
|
457 |
return ""
|
458 |
|
459 |
-
def _format_prompt(self, prompt_type,
|
460 |
"""
|
461 |
-
Formata
|
462 |
|
463 |
Args:
|
464 |
prompt_type: O tipo de prompt (translation, research, presentation)
|
465 |
-
|
466 |
|
467 |
Returns:
|
468 |
-
|
469 |
"""
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
476 |
-
return f"""Task: Translate the following text to English
|
477 |
|
478 |
Instructions:
|
479 |
Provide a direct English translation of the input text.
|
480 |
|
481 |
-
Input: {
|
482 |
|
483 |
Output:"""
|
484 |
-
|
485 |
-
|
486 |
|
487 |
Instructions:
|
488 |
You are a research assistant tasked with providing comprehensive information.
|
@@ -492,35 +491,36 @@ Please provide a detailed explanation about the topic, including:
|
|
492 |
- Current scientific understanding
|
493 |
- Important facts and statistics
|
494 |
- Recent developments or research
|
|
|
495 |
|
496 |
-
Aim to write at least
|
|
|
|
|
497 |
|
498 |
-
Input: {
|
499 |
|
500 |
Output:"""
|
501 |
-
|
502 |
-
|
503 |
|
504 |
Instructions:
|
505 |
You are presenting research findings to a general audience.
|
506 |
Please format the information in a clear, engaging, and accessible way.
|
507 |
Include:
|
508 |
-
- A clear introduction to the topic
|
509 |
- Key points organized with headings or bullet points
|
510 |
- Simple explanations of complex concepts
|
511 |
- A brief conclusion or summary
|
512 |
- Translate the entire response to Portuguese
|
513 |
- Add appropriate emojis to make the presentation more engaging
|
|
|
514 |
|
515 |
-
Input: {
|
516 |
|
517 |
Output:"""
|
518 |
-
|
519 |
-
|
520 |
-
|
521 |
-
except Exception as e:
|
522 |
-
logging.error(f"Error formatting prompt: {str(e)}")
|
523 |
-
return ""
|
524 |
|
525 |
@staticmethod
|
526 |
@st.cache_resource
|
@@ -673,7 +673,7 @@ Output:"""
|
|
673 |
research_length = len(research_content.strip()) if research_content and isinstance(research_content, str) else 0
|
674 |
logging.info(f"Research content length: {research_length} characters")
|
675 |
|
676 |
-
if not research_content or not research_content.strip() or research_length <
|
677 |
logging.warning(f"Research result too short ({research_length} chars), trying with a more specific prompt")
|
678 |
# Tentar novamente com um prompt mais específico
|
679 |
enhanced_prompt = f"""Task: Detailed Research
|
@@ -681,7 +681,9 @@ Output:"""
|
|
681 |
Instructions:
|
682 |
Provide a comprehensive explanation about '{translation_content}'.
|
683 |
Include definition, characteristics, causes, and current understanding.
|
684 |
-
Write at least
|
|
|
|
|
685 |
|
686 |
Output:"""
|
687 |
logging.info(f"Enhanced research prompt: {enhanced_prompt}")
|
@@ -692,7 +694,7 @@ Output:"""
|
|
692 |
logging.info(f"Enhanced research content length: {research_length} characters")
|
693 |
|
694 |
# Se ainda estiver vazio ou muito curto, usar uma resposta padrão
|
695 |
-
if not research_content or not research_content.strip() or research_length <
|
696 |
logging.warning(f"Research result still too short ({research_length} chars), using default response")
|
697 |
# Usar resposta padrão
|
698 |
logging.info("Using default research content")
|
@@ -718,7 +720,7 @@ Output:"""
|
|
718 |
presentation_length = len(presentation_content.strip()) if presentation_content and isinstance(presentation_content, str) else 0
|
719 |
logging.info(f"Presentation content length: {presentation_length} characters")
|
720 |
|
721 |
-
if not presentation_content or not presentation_content.strip() or presentation_length <
|
722 |
logging.warning(f"Presentation result too short ({presentation_length} chars), using default presentation")
|
723 |
# Usar apresentação padrão
|
724 |
logging.info("Using default presentation content")
|
|
|
456 |
logging.error(f"Error extracting content: {str(e)}")
|
457 |
return ""
|
458 |
|
459 |
+
def _format_prompt(self, prompt_type, content):
|
460 |
"""
|
461 |
+
Formata o prompt de acordo com o tipo.
|
462 |
|
463 |
Args:
|
464 |
prompt_type: O tipo de prompt (translation, research, presentation)
|
465 |
+
content: O conteúdo a ser incluído no prompt
|
466 |
|
467 |
Returns:
|
468 |
+
O prompt formatado
|
469 |
"""
|
470 |
+
if not content or not content.strip():
|
471 |
+
logging.warning(f"Empty content provided to _format_prompt for {prompt_type}")
|
472 |
+
return "No input provided."
|
473 |
+
|
474 |
+
if prompt_type == "translation":
|
475 |
+
return f"""Task: Translate the following text to English
|
|
|
476 |
|
477 |
Instructions:
|
478 |
Provide a direct English translation of the input text.
|
479 |
|
480 |
+
Input: {content}
|
481 |
|
482 |
Output:"""
|
483 |
+
elif prompt_type == "research":
|
484 |
+
return f"""Task: Research Assistant
|
485 |
|
486 |
Instructions:
|
487 |
You are a research assistant tasked with providing comprehensive information.
|
|
|
491 |
- Current scientific understanding
|
492 |
- Important facts and statistics
|
493 |
- Recent developments or research
|
494 |
+
- Real-world implications and applications
|
495 |
|
496 |
+
Aim to write at least 4-5 paragraphs with detailed information.
|
497 |
+
Be thorough and informative, covering all important aspects of the topic.
|
498 |
+
Use clear and accessible language suitable for a general audience.
|
499 |
|
500 |
+
Input: {content}
|
501 |
|
502 |
Output:"""
|
503 |
+
elif prompt_type == "presentation":
|
504 |
+
return f"""Task: Presentation Assistant
|
505 |
|
506 |
Instructions:
|
507 |
You are presenting research findings to a general audience.
|
508 |
Please format the information in a clear, engaging, and accessible way.
|
509 |
Include:
|
510 |
+
- A clear introduction to the topic with a compelling title
|
511 |
- Key points organized with headings or bullet points
|
512 |
- Simple explanations of complex concepts
|
513 |
- A brief conclusion or summary
|
514 |
- Translate the entire response to Portuguese
|
515 |
- Add appropriate emojis to make the presentation more engaging
|
516 |
+
- Format the text using markdown for better readability
|
517 |
|
518 |
+
Input: {content}
|
519 |
|
520 |
Output:"""
|
521 |
+
else:
|
522 |
+
logging.error(f"Unknown prompt type: {prompt_type}")
|
523 |
+
return f"Unknown prompt type: {prompt_type}"
|
|
|
|
|
|
|
524 |
|
525 |
@staticmethod
|
526 |
@st.cache_resource
|
|
|
673 |
research_length = len(research_content.strip()) if research_content and isinstance(research_content, str) else 0
|
674 |
logging.info(f"Research content length: {research_length} characters")
|
675 |
|
676 |
+
if not research_content or not research_content.strip() or research_length < 150:
|
677 |
logging.warning(f"Research result too short ({research_length} chars), trying with a more specific prompt")
|
678 |
# Tentar novamente com um prompt mais específico
|
679 |
enhanced_prompt = f"""Task: Detailed Research
|
|
|
681 |
Instructions:
|
682 |
Provide a comprehensive explanation about '{translation_content}'.
|
683 |
Include definition, characteristics, causes, and current understanding.
|
684 |
+
Write at least 4-5 paragraphs with detailed information.
|
685 |
+
Be thorough and informative, covering all important aspects of the topic.
|
686 |
+
Use clear and accessible language suitable for a general audience.
|
687 |
|
688 |
Output:"""
|
689 |
logging.info(f"Enhanced research prompt: {enhanced_prompt}")
|
|
|
694 |
logging.info(f"Enhanced research content length: {research_length} characters")
|
695 |
|
696 |
# Se ainda estiver vazio ou muito curto, usar uma resposta padrão
|
697 |
+
if not research_content or not research_content.strip() or research_length < 150:
|
698 |
logging.warning(f"Research result still too short ({research_length} chars), using default response")
|
699 |
# Usar resposta padrão
|
700 |
logging.info("Using default research content")
|
|
|
720 |
presentation_length = len(presentation_content.strip()) if presentation_content and isinstance(presentation_content, str) else 0
|
721 |
logging.info(f"Presentation content length: {presentation_length} characters")
|
722 |
|
723 |
+
if not presentation_content or not presentation_content.strip() or presentation_length < 150:
|
724 |
logging.warning(f"Presentation result too short ({presentation_length} chars), using default presentation")
|
725 |
# Usar apresentação padrão
|
726 |
logging.info("Using default presentation content")
|
test_model.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import sys
|
3 |
+
from services.model_handler import ModelHandler
|
4 |
+
|
5 |
+
# Configure logging
|
6 |
+
logging.basicConfig(
|
7 |
+
level=logging.INFO,
|
8 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
9 |
+
)
|
10 |
+
|
11 |
+
def test_model(force_default=False):
|
12 |
+
"""
|
13 |
+
Testa o ModelHandler com uma consulta de exemplo.
|
14 |
+
|
15 |
+
Args:
|
16 |
+
force_default: Se deve forçar o uso da resposta padrão
|
17 |
+
"""
|
18 |
+
logging.info("Initializing model handler...")
|
19 |
+
model_handler = ModelHandler()
|
20 |
+
|
21 |
+
# Definir a consulta de teste
|
22 |
+
test_query = "O que é autismo?"
|
23 |
+
logging.info(f"Testing with query: {test_query}")
|
24 |
+
|
25 |
+
# Definir se deve forçar a resposta padrão
|
26 |
+
model_handler.force_default_response = force_default
|
27 |
+
|
28 |
+
# Gerar a resposta
|
29 |
+
answer = model_handler.generate_answer(test_query)
|
30 |
+
|
31 |
+
# Imprimir a resposta
|
32 |
+
print("\n==================================================\n")
|
33 |
+
if force_default:
|
34 |
+
print("RESPOSTA PADRÃO FORÇADA:")
|
35 |
+
else:
|
36 |
+
print("RESPOSTA DO MODELO:")
|
37 |
+
print(answer)
|
38 |
+
print("\n==================================================\n")
|
39 |
+
|
40 |
+
if __name__ == "__main__":
|
41 |
+
# Testar com a resposta do modelo
|
42 |
+
test_model(force_default=False)
|
43 |
+
|
44 |
+
# Testar com a resposta padrão forçada
|
45 |
+
logging.info("Testing with forced default response...")
|
46 |
+
test_model(force_default=True)
|