Spaces:
Sleeping
Sleeping
add tools
Browse files- app.py +7 -0
- services/model_handler.py +33 -15
app.py
CHANGED
@@ -25,12 +25,19 @@ class AutismResearchApp:
|
|
25 |
|
26 |
async def run(self):
|
27 |
"""Run the main application loop"""
|
|
|
28 |
self._setup_streamlit()
|
29 |
|
30 |
# Initialize session state for papers
|
31 |
if 'papers' not in st.session_state:
|
32 |
st.session_state.papers = []
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
# Get user query
|
35 |
col1, col2 = st.columns(2, vertical_alignment="bottom", gap="small")
|
36 |
|
|
|
25 |
|
26 |
async def run(self):
|
27 |
"""Run the main application loop"""
|
28 |
+
|
29 |
self._setup_streamlit()
|
30 |
|
31 |
# Initialize session state for papers
|
32 |
if 'papers' not in st.session_state:
|
33 |
st.session_state.papers = []
|
34 |
|
35 |
+
# Carregar modelos assincronamente
|
36 |
+
with st.status("Carregando modelos...") as status:
|
37 |
+
status.write("🔄 Inicializando modelos de linguagem...")
|
38 |
+
await self.model_handler._load_models_async()
|
39 |
+
status.write("✅ Modelos carregados com sucesso!")
|
40 |
+
|
41 |
# Get user query
|
42 |
col1, col2 = st.columns(2, vertical_alignment="bottom", gap="small")
|
43 |
|
services/model_handler.py
CHANGED
@@ -106,6 +106,19 @@ class LocalHuggingFaceModel(Model):
|
|
106 |
logging.error(f"[{self.model_name}] Error in ainvoke_stream: {str(e)}")
|
107 |
yield Response(f"Error in ainvoke_stream: {str(e)}")
|
108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
def invoke(self, prompt: str, **kwargs) -> str:
|
110 |
"""Synchronous invoke method"""
|
111 |
try:
|
@@ -320,8 +333,8 @@ class ModelHandler:
|
|
320 |
self.force_default_response = False
|
321 |
self.models = {}
|
322 |
|
323 |
-
#
|
324 |
-
|
325 |
|
326 |
def _extract_content(self, result):
|
327 |
"""
|
@@ -345,6 +358,14 @@ class ModelHandler:
|
|
345 |
logging.error(f"Error extracting content: {str(e)}")
|
346 |
return ""
|
347 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
348 |
def _format_prompt(self, prompt_type, content):
|
349 |
"""
|
350 |
Formata o prompt de acordo com o tipo.
|
@@ -706,9 +727,8 @@ Para informações mais detalhadas e específicas, recomendamos consultar litera
|
|
706 |
"""
|
707 |
try:
|
708 |
logging.info(f"Running agent {agent.name} with tools")
|
709 |
-
# O método arun retorna um
|
710 |
-
|
711 |
-
result = await response_generator.__anext__()
|
712 |
logging.info(f"Agent {agent.name} execution complete")
|
713 |
return result
|
714 |
except Exception as e:
|
@@ -742,13 +762,12 @@ Para informações mais detalhadas e específicas, recomendamos consultar litera
|
|
742 |
logging.info(f"Translation prompt: {translation_prompt}")
|
743 |
|
744 |
try:
|
745 |
-
# O método arun retorna um
|
746 |
-
|
747 |
-
|
748 |
-
logging.info(f"Translation result type: {type(translation_result)}")
|
749 |
|
750 |
# Extrair o conteúdo da resposta
|
751 |
-
translation_content = self._extract_content(
|
752 |
logging.info(f"Translation content: {translation_content}")
|
753 |
|
754 |
if not translation_content or not translation_content.strip():
|
@@ -800,12 +819,11 @@ Para informações mais detalhadas e específicas, recomendamos consultar litera
|
|
800 |
presentation_prompt = self._format_prompt("presentation", research_content)
|
801 |
logging.info(f"Presentation prompt: {presentation_prompt}")
|
802 |
|
803 |
-
# O método arun retorna um
|
804 |
-
|
805 |
-
|
806 |
-
logging.info(f"Presentation type: {type(presentation_result)}")
|
807 |
|
808 |
-
presentation_content = self._extract_content(
|
809 |
logging.info(f"Presentation content: {presentation_content}")
|
810 |
|
811 |
presentation_length = len(presentation_content.strip()) if presentation_content and isinstance(presentation_content, str) else 0
|
|
|
106 |
logging.error(f"[{self.model_name}] Error in ainvoke_stream: {str(e)}")
|
107 |
yield Response(f"Error in ainvoke_stream: {str(e)}")
|
108 |
|
109 |
+
async def aresponse_stream(self, prompt: str, **kwargs):
|
110 |
+
"""
|
111 |
+
Método abstrato necessário para implementar a interface Model da biblioteca agno.
|
112 |
+
Este método deve retornar um gerador assíncrono de objetos Response.
|
113 |
+
"""
|
114 |
+
try:
|
115 |
+
logging.info(f"[{self.model_name}] aresponse_stream called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
|
116 |
+
result = self.invoke(prompt, **kwargs)
|
117 |
+
yield result
|
118 |
+
except Exception as e:
|
119 |
+
logging.error(f"[{self.model_name}] Error in aresponse_stream: {str(e)}")
|
120 |
+
yield Response(f"Error in aresponse_stream: {str(e)}")
|
121 |
+
|
122 |
def invoke(self, prompt: str, **kwargs) -> str:
|
123 |
"""Synchronous invoke method"""
|
124 |
try:
|
|
|
333 |
self.force_default_response = False
|
334 |
self.models = {}
|
335 |
|
336 |
+
# Os modelos serão carregados posteriormente de forma assíncrona
|
337 |
+
logging.info("ModelHandler initialized. Models will be loaded asynchronously.")
|
338 |
|
339 |
def _extract_content(self, result):
|
340 |
"""
|
|
|
358 |
logging.error(f"Error extracting content: {str(e)}")
|
359 |
return ""
|
360 |
|
361 |
+
async def _load_models_async(self):
|
362 |
+
"""
|
363 |
+
Carrega os modelos de forma assíncrona.
|
364 |
+
"""
|
365 |
+
logging.info("Loading models asynchronously...")
|
366 |
+
self._load_models()
|
367 |
+
logging.info("Models loaded asynchronously")
|
368 |
+
|
369 |
def _format_prompt(self, prompt_type, content):
|
370 |
"""
|
371 |
Formata o prompt de acordo com o tipo.
|
|
|
727 |
"""
|
728 |
try:
|
729 |
logging.info(f"Running agent {agent.name} with tools")
|
730 |
+
# O método arun retorna um coroutine que precisa ser awaited
|
731 |
+
result = await agent.arun(prompt, max_steps=max_steps)
|
|
|
732 |
logging.info(f"Agent {agent.name} execution complete")
|
733 |
return result
|
734 |
except Exception as e:
|
|
|
762 |
logging.info(f"Translation prompt: {translation_prompt}")
|
763 |
|
764 |
try:
|
765 |
+
# O método arun retorna um coroutine que precisa ser awaited
|
766 |
+
result = await self.translator.arun(translation_prompt)
|
767 |
+
logging.info(f"Translation result type: {type(result)}")
|
|
|
768 |
|
769 |
# Extrair o conteúdo da resposta
|
770 |
+
translation_content = self._extract_content(result)
|
771 |
logging.info(f"Translation content: {translation_content}")
|
772 |
|
773 |
if not translation_content or not translation_content.strip():
|
|
|
819 |
presentation_prompt = self._format_prompt("presentation", research_content)
|
820 |
logging.info(f"Presentation prompt: {presentation_prompt}")
|
821 |
|
822 |
+
# O método arun retorna um coroutine que precisa ser awaited
|
823 |
+
result = await self.presenter.arun(presentation_prompt)
|
824 |
+
logging.info(f"Presentation type: {type(result)}")
|
|
|
825 |
|
826 |
+
presentation_content = self._extract_content(result)
|
827 |
logging.info(f"Presentation content: {presentation_content}")
|
828 |
|
829 |
presentation_length = len(presentation_content.strip()) if presentation_content and isinstance(presentation_content, str) else 0
|