wakeupmh commited on
Commit
1c95026
·
1 Parent(s): 046f1bb

pass prompt

Browse files
Files changed (2) hide show
  1. services/model_handler.py +366 -239
  2. test_model.py +40 -0
services/model_handler.py CHANGED
@@ -67,23 +67,41 @@ class LocalHuggingFaceModel(Model):
67
 
68
  async def ainvoke(self, prompt: str, **kwargs) -> str:
69
  """Async invoke method"""
70
- return await self.invoke(prompt=prompt, **kwargs)
 
 
 
 
 
71
 
72
  async def ainvoke_stream(self, prompt: str, **kwargs):
73
  """Async streaming invoke method"""
74
- result = await self.invoke(prompt=prompt, **kwargs)
75
- yield result
 
 
 
 
 
76
 
77
  def invoke(self, prompt: str, **kwargs) -> str:
78
  """Synchronous invoke method"""
79
  try:
80
- logging.info(f"Invoking model with prompt: {prompt[:100] if prompt else 'None'}...")
81
 
82
  # Check if prompt is None or empty
83
  if prompt is None:
84
  logging.warning("None prompt provided to invoke method")
85
  return Response("No input provided. Please provide a valid prompt.")
86
 
 
 
 
 
 
 
 
 
87
  if not prompt.strip():
88
  logging.warning("Empty prompt provided to invoke method")
89
  return Response("No input provided. Please provide a non-empty prompt.")
@@ -120,8 +138,13 @@ class LocalHuggingFaceModel(Model):
120
 
121
  def invoke_stream(self, prompt: str, **kwargs):
122
  """Synchronous streaming invoke method"""
123
- result = self.invoke(prompt=prompt, **kwargs)
124
- yield result
 
 
 
 
 
125
 
126
  def parse_provider_response(self, response: str) -> str:
127
  """Parse the provider response"""
@@ -133,32 +156,129 @@ class LocalHuggingFaceModel(Model):
133
 
134
  async def aresponse(self, prompt=None, **kwargs):
135
  """Async response method - required abstract method"""
136
- if prompt is None:
137
- prompt = kwargs.get('input', '')
138
- content = await self.ainvoke(prompt=prompt, **kwargs)
139
- return Response(content)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
 
141
  async def aresponse_stream(self, prompt=None, **kwargs):
142
  """Async streaming response method - required abstract method"""
143
- if prompt is None:
144
- prompt = kwargs.get('input', '')
145
- async for chunk in self.ainvoke_stream(prompt=prompt, **kwargs):
146
- yield Response(chunk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
 
148
  def response(self, prompt=None, **kwargs):
149
  """Synchronous response method - required abstract method"""
150
- if prompt is None:
151
- prompt = kwargs.get('input', '')
152
- content = self.invoke(prompt=prompt, **kwargs)
153
- return Response(content)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
 
155
  def response_stream(self, prompt=None, **kwargs):
156
  """Synchronous streaming response method - required abstract method"""
157
- if prompt is None:
158
- prompt = kwargs.get('input', '')
159
- for chunk in self.invoke_stream(prompt=prompt, **kwargs):
160
- yield Response(chunk)
161
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
  def generate(self, prompt: str, **kwargs):
163
  try:
164
  inputs = self.tokenizer(prompt, return_tensors="pt", padding=True)
@@ -178,7 +298,7 @@ class LocalHuggingFaceModel(Model):
178
 
179
  return decoded_output
180
  except Exception as e:
181
- logging.error(f"Error in local model generation: {str(e)}")
182
  if hasattr(e, 'args') and len(e.args) > 0:
183
  error_message = e.args[0]
184
  else:
@@ -217,31 +337,128 @@ class DummyModel(Model):
217
 
218
  async def aresponse(self, prompt=None, **kwargs):
219
  """Async response method - required abstract method"""
220
- if prompt is None:
221
- prompt = kwargs.get('input', '')
222
- content = await self.ainvoke(prompt=prompt, **kwargs)
223
- return Response(content)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224
 
225
  async def aresponse_stream(self, prompt=None, **kwargs):
226
  """Async streaming response method - required abstract method"""
227
- if prompt is None:
228
- prompt = kwargs.get('input', '')
229
- async for chunk in self.ainvoke_stream(prompt=prompt, **kwargs):
230
- yield Response(chunk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
 
232
  def response(self, prompt=None, **kwargs):
233
  """Synchronous response method - required abstract method"""
234
- if prompt is None:
235
- prompt = kwargs.get('input', '')
236
- content = self.invoke(prompt=prompt, **kwargs)
237
- return Response(content)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238
 
239
  def response_stream(self, prompt=None, **kwargs):
240
  """Synchronous streaming response method - required abstract method"""
241
- if prompt is None:
242
- prompt = kwargs.get('input', '')
243
- for chunk in self.invoke_stream(prompt=prompt, **kwargs):
244
- yield Response(chunk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245
 
246
  class ModelHandler:
247
  def __init__(self):
@@ -329,37 +546,56 @@ class ModelHandler:
329
  add_references=True,
330
  )
331
 
332
- def _format_prompt(self, role, instructions, query):
333
- """Format the prompt for the model"""
334
- # Validate inputs
335
- if not role or not role.strip():
336
- role = "Assistant"
337
- logging.warning("Empty role provided to _format_prompt, using default: 'Assistant'")
338
-
339
- if not instructions or not instructions.strip():
340
- instructions = "Please process the following input."
341
- logging.warning("Empty instructions provided to _format_prompt, using default instructions")
342
 
343
- if not query or not query.strip():
344
- query = "No input provided."
345
- logging.warning("Empty query provided to _format_prompt, using placeholder text")
346
-
347
- # Format the prompt
348
- formatted_prompt = f"""Task: {role}
349
-
350
- Instructions:
351
- {instructions}
352
-
353
- Input: {query}
354
-
355
- Output:"""
356
-
357
- # Ensure the prompt is not empty
358
- if not formatted_prompt or not formatted_prompt.strip():
359
- logging.error("Generated an empty prompt despite validation")
360
- formatted_prompt = "Please provide a response."
 
 
 
361
 
362
- return formatted_prompt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363
 
364
  @staticmethod
365
  @st.cache_resource
@@ -428,189 +664,80 @@ Output:"""
428
  return LocalHuggingFaceModel(self.model, self.tokenizer, max_length=512)
429
 
430
  def generate_answer(self, query: str) -> str:
431
- try:
432
- logging.info(f"Generating answer for query: {query}")
 
 
 
433
 
434
- # Validate input query
 
 
 
435
  if not query or not query.strip():
436
  logging.error("Empty query provided")
437
- return "Error: Please provide a non-empty query"
438
-
439
- # Check if models are available
440
- if isinstance(self.translator, DummyModel) or isinstance(self.researcher, DummyModel) or \
441
- isinstance(self.summarizer, DummyModel) or isinstance(self.presenter, DummyModel):
442
- logging.error("One or more models are not available")
443
- return """
444
- # 🚨 Serviço Temporariamente Indisponível 🚨
445
-
446
- Desculpe, estamos enfrentando problemas de conexão com nossos serviços de modelo de linguagem.
447
-
448
- ## Possíveis causas:
449
- - Problemas de conexão com a internet
450
- - Servidores do Hugging Face podem estar sobrecarregados ou temporariamente indisponíveis
451
- - Limitações de recursos do sistema
452
-
453
- ## O que você pode fazer:
454
- - Tente novamente mais tarde
455
- - Verifique sua conexão com a internet
456
- - Entre em contato com o suporte se o problema persistir
457
-
458
- Agradecemos sua compreensão!
459
- """
460
-
461
- # Format translation prompt
462
- translation_prompt = self._format_prompt(
463
- role="Translate the following text to English",
464
- instructions="Provide a direct English translation of the input text.",
465
- query=query
466
- )
467
- logging.info(f"Translation prompt: {translation_prompt}")
468
 
469
- # Validate translation prompt
470
- if not translation_prompt or not translation_prompt.strip():
471
- logging.error("Empty translation prompt generated")
472
- return "Error: Unable to generate translation prompt"
473
 
474
- # Get English translation
475
- translation = self.translator.run(translation_prompt, stream=False)
476
- logging.info(f"Translation result type: {type(translation)}")
477
- logging.info(f"Translation result: {translation}")
478
 
479
- if not translation:
480
- logging.error("Translation failed")
481
- return "Error: Unable to translate the query"
482
 
483
- if hasattr(translation, 'content'):
484
- translation_content = translation.content
 
 
 
 
485
  logging.info(f"Translation content: {translation_content}")
486
- else:
487
- translation_content = str(translation)
488
- logging.info(f"Translation as string: {translation_content}")
489
-
490
- # Validate translation content
491
- if not translation_content or not translation_content.strip():
492
- logging.error("Empty translation content")
493
- return "Error: Empty translation result"
494
-
495
- # Format research prompt
496
- research_prompt = self._format_prompt(
497
- role="Research Assistant",
498
- instructions="Provide a clear and concise answer based on scientific sources.",
499
- query=translation_content
500
- )
501
- logging.info(f"Research prompt: {research_prompt}")
502
-
503
- # Validate research prompt
504
- if not research_prompt or not research_prompt.strip():
505
- logging.error("Empty research prompt generated")
506
- return "Error: Unable to generate research prompt"
507
-
508
- # Get research results
509
- research_results = self.researcher.run(research_prompt, stream=False)
510
- logging.info(f"Research results type: {type(research_results)}")
511
- logging.info(f"Research results: {research_results}")
512
-
513
- if not research_results:
514
- logging.error("Research failed")
515
- return "Error: Unable to perform research"
516
-
517
- if hasattr(research_results, 'content'):
518
- research_content = research_results.content
519
  logging.info(f"Research content: {research_content}")
520
- else:
521
- research_content = str(research_results)
522
- logging.info(f"Research as string: {research_content}")
523
-
524
- # Validate research content
525
- if not research_content or not research_content.strip():
526
- logging.error("Empty research content")
527
- return "Error: Empty research result"
528
-
529
- logging.info(f"Research results: {research_results}")
530
-
531
- # Format summary prompt
532
- summary_prompt = self._format_prompt(
533
- role="Summary Assistant",
534
- instructions="Provide a clear and concise summary of the research results.",
535
- query=research_content
536
- )
537
- logging.info(f"Summary prompt: {summary_prompt}")
538
-
539
- # Validate summary prompt
540
- if not summary_prompt or not summary_prompt.strip():
541
- logging.error("Empty summary prompt generated")
542
- return "Error: Unable to generate summary prompt"
543
-
544
- # Get summary
545
- summary = self.summarizer.run(summary_prompt, stream=False)
546
- logging.info(f"Summary type: {type(summary)}")
547
- logging.info(f"Summary: {summary}")
548
-
549
- if not summary:
550
- logging.error("Summary failed")
551
- return "Error: Unable to generate summary"
552
-
553
- if hasattr(summary, 'content'):
554
- summary_content = summary.content
555
- logging.info(f"Summary content: {summary_content}")
556
- else:
557
- summary_content = str(summary)
558
- logging.info(f"Summary as string: {summary_content}")
559
-
560
- # Validate summary content
561
- if not summary_content or not summary_content.strip():
562
- logging.error("Empty summary content")
563
- return "Error: Empty summary result"
564
-
565
- logging.info(f"Summary: {summary}")
566
-
567
- # Format presentation prompt
568
- presentation_prompt = self._format_prompt(
569
- role="Presentation Assistant",
570
- instructions="Provide a clear and concise presentation of the research results.",
571
- query=summary_content
572
- )
573
- logging.info(f"Presentation prompt: {presentation_prompt}")
574
-
575
- # Validate presentation prompt
576
- if not presentation_prompt or not presentation_prompt.strip():
577
- logging.error("Empty presentation prompt generated")
578
- return "Error: Unable to generate presentation prompt"
579
-
580
- # Get presentation
581
- presentation = self.presenter.run(presentation_prompt, stream=False)
582
- logging.info(f"Presentation type: {type(presentation)}")
583
- logging.info(f"Presentation: {presentation}")
584
-
585
- if not presentation:
586
- logging.error("Presentation failed")
587
- return "Error: Unable to generate presentation"
588
-
589
- if hasattr(presentation, 'content'):
590
- presentation_content = presentation.content
591
  logging.info(f"Presentation content: {presentation_content}")
592
 
593
- # Check if content is empty or just whitespace
594
- if not presentation_content.strip():
595
- logging.error("Presentation content is empty or whitespace")
596
- return "Error: Empty presentation content"
597
 
 
598
  return presentation_content
599
- else:
600
- presentation_str = str(presentation)
601
- logging.info(f"Presentation as string: {presentation_str}")
602
 
603
- # Check if content is empty or just whitespace
604
- if not presentation_str.strip():
605
- logging.error("Presentation string is empty or whitespace")
606
- return "Error: Empty presentation string"
607
 
608
- return presentation_str
609
-
610
  except Exception as e:
611
- logging.error(f"Error generating answer: {str(e)}")
612
- if hasattr(e, 'args') and len(e.args) > 0:
613
- error_message = e.args[0]
614
- else:
615
- error_message = str(e)
616
- return f"Error: {error_message}"
 
67
 
68
  async def ainvoke(self, prompt: str, **kwargs) -> str:
69
  """Async invoke method"""
70
+ try:
71
+ logging.info(f"ainvoke called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
72
+ return await self.invoke(prompt, **kwargs)
73
+ except Exception as e:
74
+ logging.error(f"Error in ainvoke: {str(e)}")
75
+ return Response(f"Error in ainvoke: {str(e)}")
76
 
77
  async def ainvoke_stream(self, prompt: str, **kwargs):
78
  """Async streaming invoke method"""
79
+ try:
80
+ logging.info(f"ainvoke_stream called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
81
+ result = await self.invoke(prompt, **kwargs)
82
+ yield result
83
+ except Exception as e:
84
+ logging.error(f"Error in ainvoke_stream: {str(e)}")
85
+ yield Response(f"Error in ainvoke_stream: {str(e)}")
86
 
87
  def invoke(self, prompt: str, **kwargs) -> str:
88
  """Synchronous invoke method"""
89
  try:
90
+ logging.info(f"Invoking model with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
91
 
92
  # Check if prompt is None or empty
93
  if prompt is None:
94
  logging.warning("None prompt provided to invoke method")
95
  return Response("No input provided. Please provide a valid prompt.")
96
 
97
+ if not isinstance(prompt, str):
98
+ logging.warning(f"Non-string prompt provided: {type(prompt)}")
99
+ try:
100
+ prompt = str(prompt)
101
+ logging.info(f"Converted prompt to string: {prompt[:100]}...")
102
+ except:
103
+ return Response("Invalid input type. Please provide a string prompt.")
104
+
105
  if not prompt.strip():
106
  logging.warning("Empty prompt provided to invoke method")
107
  return Response("No input provided. Please provide a non-empty prompt.")
 
138
 
139
  def invoke_stream(self, prompt: str, **kwargs):
140
  """Synchronous streaming invoke method"""
141
+ try:
142
+ logging.info(f"invoke_stream called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
143
+ result = self.invoke(prompt, **kwargs)
144
+ yield result
145
+ except Exception as e:
146
+ logging.error(f"Error in invoke_stream: {str(e)}")
147
+ yield Response(f"Error in invoke_stream: {str(e)}")
148
 
149
  def parse_provider_response(self, response: str) -> str:
150
  """Parse the provider response"""
 
156
 
157
  async def aresponse(self, prompt=None, **kwargs):
158
  """Async response method - required abstract method"""
159
+ try:
160
+ # Log detalhado de todos os argumentos
161
+ logging.info(f"aresponse args: prompt={prompt}, kwargs keys={list(kwargs.keys())}")
162
+
163
+ # Extrair o prompt das mensagens se estiverem disponíveis
164
+ if prompt is None and 'messages' in kwargs and kwargs['messages']:
165
+ messages = kwargs['messages']
166
+ # Procurar pela mensagem do usuário
167
+ for message in messages:
168
+ if hasattr(message, 'role') and message.role == 'user' and hasattr(message, 'content'):
169
+ prompt = message.content
170
+ logging.info(f"Extracted prompt from user message: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
171
+ break
172
+
173
+ # Verificar se o prompt está em kwargs['input']
174
+ if prompt is None:
175
+ if 'input' in kwargs:
176
+ prompt = kwargs.get('input')
177
+ logging.info(f"Found prompt in kwargs['input']: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
178
+
179
+ logging.info(f"aresponse called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
180
+
181
+ if not prompt or not isinstance(prompt, str) or not prompt.strip():
182
+ logging.warning("Empty or invalid prompt in aresponse")
183
+ return Response("No input provided. Please provide a valid prompt.")
184
+
185
+ content = await self.ainvoke(prompt, **kwargs)
186
+ return content if isinstance(content, Response) else Response(content)
187
+ except Exception as e:
188
+ logging.error(f"Error in aresponse: {str(e)}")
189
+ return Response(f"Error in aresponse: {str(e)}")
190
 
191
  async def aresponse_stream(self, prompt=None, **kwargs):
192
  """Async streaming response method - required abstract method"""
193
+ try:
194
+ # Verificar se o prompt está em kwargs['input']
195
+ if prompt is None:
196
+ if 'input' in kwargs:
197
+ prompt = kwargs.get('input')
198
+ logging.info(f"Found prompt in kwargs['input']: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
199
+
200
+ logging.info(f"aresponse_stream called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
201
+
202
+ if not prompt or not isinstance(prompt, str) or not prompt.strip():
203
+ logging.warning("Empty or invalid prompt in aresponse_stream")
204
+ yield Response("No input provided. Please provide a valid prompt.")
205
+ return
206
+
207
+ async for chunk in self.ainvoke_stream(prompt, **kwargs):
208
+ yield chunk if isinstance(chunk, Response) else Response(chunk)
209
+ except Exception as e:
210
+ logging.error(f"Error in aresponse_stream: {str(e)}")
211
+ yield Response(f"Error in aresponse_stream: {str(e)}")
212
 
213
  def response(self, prompt=None, **kwargs):
214
  """Synchronous response method - required abstract method"""
215
+ try:
216
+ # Log detalhado de todos os argumentos
217
+ logging.info(f"response args: prompt={prompt}, kwargs keys={list(kwargs.keys())}")
218
+
219
+ # Extrair o prompt das mensagens se estiverem disponíveis
220
+ if prompt is None and 'messages' in kwargs and kwargs['messages']:
221
+ messages = kwargs['messages']
222
+ # Procurar pela mensagem do usuário
223
+ for message in messages:
224
+ if hasattr(message, 'role') and message.role == 'user' and hasattr(message, 'content'):
225
+ prompt = message.content
226
+ logging.info(f"Extracted prompt from user message: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
227
+ break
228
+
229
+ # Verificar se o prompt está em kwargs['input']
230
+ if prompt is None:
231
+ if 'input' in kwargs:
232
+ prompt = kwargs.get('input')
233
+ logging.info(f"Found prompt in kwargs['input']: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
234
+
235
+ logging.info(f"response called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
236
+
237
+ if not prompt or not isinstance(prompt, str) or not prompt.strip():
238
+ logging.warning("Empty or invalid prompt in response")
239
+ return Response("No input provided. Please provide a valid prompt.")
240
+
241
+ content = self.invoke(prompt, **kwargs)
242
+ return content if isinstance(content, Response) else Response(content)
243
+ except Exception as e:
244
+ logging.error(f"Error in response: {str(e)}")
245
+ return Response(f"Error in response: {str(e)}")
246
 
247
  def response_stream(self, prompt=None, **kwargs):
248
  """Synchronous streaming response method - required abstract method"""
249
+ try:
250
+ # Log detalhado de todos os argumentos
251
+ logging.info(f"response_stream args: prompt={prompt}, kwargs keys={list(kwargs.keys())}")
252
+
253
+ # Extrair o prompt das mensagens se estiverem disponíveis
254
+ if prompt is None and 'messages' in kwargs and kwargs['messages']:
255
+ messages = kwargs['messages']
256
+ # Procurar pela mensagem do usuário
257
+ for message in messages:
258
+ if hasattr(message, 'role') and message.role == 'user' and hasattr(message, 'content'):
259
+ prompt = message.content
260
+ logging.info(f"Extracted prompt from user message: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
261
+ break
262
+
263
+ # Verificar se o prompt está em kwargs['input']
264
+ if prompt is None:
265
+ if 'input' in kwargs:
266
+ prompt = kwargs.get('input')
267
+ logging.info(f"Found prompt in kwargs['input']: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
268
+
269
+ logging.info(f"response_stream called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
270
+
271
+ if not prompt or not isinstance(prompt, str) or not prompt.strip():
272
+ logging.warning("Empty or invalid prompt in response_stream")
273
+ yield Response("No input provided. Please provide a valid prompt.")
274
+ return
275
+
276
+ for chunk in self.invoke_stream(prompt, **kwargs):
277
+ yield chunk if isinstance(chunk, Response) else Response(chunk)
278
+ except Exception as e:
279
+ logging.error(f"Error in response_stream: {str(e)}")
280
+ yield Response(f"Error in response_stream: {str(e)}")
281
+
282
  def generate(self, prompt: str, **kwargs):
283
  try:
284
  inputs = self.tokenizer(prompt, return_tensors="pt", padding=True)
 
298
 
299
  return decoded_output
300
  except Exception as e:
301
+ logging.error(f"Error in generate method: {str(e)}")
302
  if hasattr(e, 'args') and len(e.args) > 0:
303
  error_message = e.args[0]
304
  else:
 
337
 
338
  async def aresponse(self, prompt=None, **kwargs):
339
  """Async response method - required abstract method"""
340
+ try:
341
+ # Log detalhado de todos os argumentos
342
+ logging.info(f"aresponse args: prompt={prompt}, kwargs keys={list(kwargs.keys())}")
343
+
344
+ # Extrair o prompt das mensagens se estiverem disponíveis
345
+ if prompt is None and 'messages' in kwargs and kwargs['messages']:
346
+ messages = kwargs['messages']
347
+ # Procurar pela mensagem do usuário
348
+ for message in messages:
349
+ if hasattr(message, 'role') and message.role == 'user' and hasattr(message, 'content'):
350
+ prompt = message.content
351
+ logging.info(f"Extracted prompt from user message: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
352
+ break
353
+
354
+ # Verificar se o prompt está em kwargs['input']
355
+ if prompt is None:
356
+ if 'input' in kwargs:
357
+ prompt = kwargs.get('input')
358
+ logging.info(f"Found prompt in kwargs['input']: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
359
+
360
+ logging.info(f"aresponse called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
361
+
362
+ if not prompt or not isinstance(prompt, str) or not prompt.strip():
363
+ logging.warning("Empty or invalid prompt in aresponse")
364
+ return Response("No input provided. Please provide a valid prompt.")
365
+
366
+ content = await self.ainvoke(prompt, **kwargs)
367
+ return content if isinstance(content, Response) else Response(content)
368
+ except Exception as e:
369
+ logging.error(f"Error in aresponse: {str(e)}")
370
+ return Response(f"Error in aresponse: {str(e)}")
371
 
372
  async def aresponse_stream(self, prompt=None, **kwargs):
373
  """Async streaming response method - required abstract method"""
374
+ try:
375
+ # Verificar se o prompt está em kwargs['input']
376
+ if prompt is None:
377
+ if 'input' in kwargs:
378
+ prompt = kwargs.get('input')
379
+ logging.info(f"Found prompt in kwargs['input']: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
380
+
381
+ logging.info(f"aresponse_stream called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
382
+
383
+ if not prompt or not isinstance(prompt, str) or not prompt.strip():
384
+ logging.warning("Empty or invalid prompt in aresponse_stream")
385
+ yield Response("No input provided. Please provide a valid prompt.")
386
+ return
387
+
388
+ async for chunk in self.ainvoke_stream(prompt, **kwargs):
389
+ yield chunk if isinstance(chunk, Response) else Response(chunk)
390
+ except Exception as e:
391
+ logging.error(f"Error in aresponse_stream: {str(e)}")
392
+ yield Response(f"Error in aresponse_stream: {str(e)}")
393
 
394
  def response(self, prompt=None, **kwargs):
395
  """Synchronous response method - required abstract method"""
396
+ try:
397
+ # Log detalhado de todos os argumentos
398
+ logging.info(f"response args: prompt={prompt}, kwargs keys={list(kwargs.keys())}")
399
+
400
+ # Extrair o prompt das mensagens se estiverem disponíveis
401
+ if prompt is None and 'messages' in kwargs and kwargs['messages']:
402
+ messages = kwargs['messages']
403
+ # Procurar pela mensagem do usuário
404
+ for message in messages:
405
+ if hasattr(message, 'role') and message.role == 'user' and hasattr(message, 'content'):
406
+ prompt = message.content
407
+ logging.info(f"Extracted prompt from user message: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
408
+ break
409
+
410
+ # Verificar se o prompt está em kwargs['input']
411
+ if prompt is None:
412
+ if 'input' in kwargs:
413
+ prompt = kwargs.get('input')
414
+ logging.info(f"Found prompt in kwargs['input']: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
415
+
416
+ logging.info(f"response called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
417
+
418
+ if not prompt or not isinstance(prompt, str) or not prompt.strip():
419
+ logging.warning("Empty or invalid prompt in response")
420
+ return Response("No input provided. Please provide a valid prompt.")
421
+
422
+ content = self.invoke(prompt, **kwargs)
423
+ return content if isinstance(content, Response) else Response(content)
424
+ except Exception as e:
425
+ logging.error(f"Error in response: {str(e)}")
426
+ return Response(f"Error in response: {str(e)}")
427
 
428
  def response_stream(self, prompt=None, **kwargs):
429
  """Synchronous streaming response method - required abstract method"""
430
+ try:
431
+ # Log detalhado de todos os argumentos
432
+ logging.info(f"response_stream args: prompt={prompt}, kwargs keys={list(kwargs.keys())}")
433
+
434
+ # Extrair o prompt das mensagens se estiverem disponíveis
435
+ if prompt is None and 'messages' in kwargs and kwargs['messages']:
436
+ messages = kwargs['messages']
437
+ # Procurar pela mensagem do usuário
438
+ for message in messages:
439
+ if hasattr(message, 'role') and message.role == 'user' and hasattr(message, 'content'):
440
+ prompt = message.content
441
+ logging.info(f"Extracted prompt from user message: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
442
+ break
443
+
444
+ # Verificar se o prompt está em kwargs['input']
445
+ if prompt is None:
446
+ if 'input' in kwargs:
447
+ prompt = kwargs.get('input')
448
+ logging.info(f"Found prompt in kwargs['input']: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}")
449
+
450
+ logging.info(f"response_stream called with prompt: {prompt[:100] if prompt and isinstance(prompt, str) else 'None'}...")
451
+
452
+ if not prompt or not isinstance(prompt, str) or not prompt.strip():
453
+ logging.warning("Empty or invalid prompt in response_stream")
454
+ yield Response("No input provided. Please provide a valid prompt.")
455
+ return
456
+
457
+ for chunk in self.invoke_stream(prompt, **kwargs):
458
+ yield chunk if isinstance(chunk, Response) else Response(chunk)
459
+ except Exception as e:
460
+ logging.error(f"Error in response_stream: {str(e)}")
461
+ yield Response(f"Error in response_stream: {str(e)}")
462
 
463
  class ModelHandler:
464
  def __init__(self):
 
546
  add_references=True,
547
  )
548
 
549
+ def _extract_content(self, result):
550
+ """
551
+ Extrai o conteúdo de uma resposta do modelo.
 
 
 
 
 
 
 
552
 
553
+ Args:
554
+ result: A resposta do modelo, que pode ser um objeto RunResponse ou uma string
555
+
556
+ Returns:
557
+ O conteúdo da resposta como string
558
+ """
559
+ try:
560
+ if result is None:
561
+ return ""
562
+
563
+ if hasattr(result, 'content'):
564
+ return result.content
565
+
566
+ return str(result)
567
+ except Exception as e:
568
+ logging.error(f"Error extracting content: {str(e)}")
569
+ return ""
570
+
571
+ def _format_prompt(self, prompt_type, query):
572
+ """
573
+ Formata um prompt para o modelo com base no tipo de prompt e na consulta.
574
 
575
+ Args:
576
+ prompt_type: O tipo de prompt (translation, research, presentation)
577
+ query: A consulta do usuário ou o resultado de uma etapa anterior
578
+
579
+ Returns:
580
+ Um prompt formatado
581
+ """
582
+ try:
583
+ if not query or not query.strip():
584
+ logging.warning(f"Empty query provided to _format_prompt for {prompt_type}")
585
+ return ""
586
+
587
+ if prompt_type == "translation":
588
+ return f"Task: Translate the following text to English\n\nInstructions:\nProvide a direct English translation of the input text.\n\nInput: {query}\n\nOutput:"
589
+ elif prompt_type == "research":
590
+ return f"Task: Research Assistant\n\nInstructions:\nProvide a clear and concise answer based on scientific sources.\n\nInput: {query}\n\nOutput:"
591
+ elif prompt_type == "presentation":
592
+ return f"Task: Presentation Assistant\n\nInstructions:\nProvide a clear and concise presentation of the research results.\n\nInput: {query}\n\nOutput:"
593
+ else:
594
+ logging.warning(f"Unknown prompt type: {prompt_type}")
595
+ return ""
596
+ except Exception as e:
597
+ logging.error(f"Error formatting prompt: {str(e)}")
598
+ return ""
599
 
600
  @staticmethod
601
  @st.cache_resource
 
664
  return LocalHuggingFaceModel(self.model, self.tokenizer, max_length=512)
665
 
666
  def generate_answer(self, query: str) -> str:
667
+ """
668
+ Gera uma resposta baseada na consulta do usuário.
669
+
670
+ Args:
671
+ query: A consulta do usuário
672
 
673
+ Returns:
674
+ Uma resposta formatada
675
+ """
676
+ try:
677
  if not query or not query.strip():
678
  logging.error("Empty query provided")
679
+ return "Erro: Por favor, forneça uma consulta não vazia."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
680
 
681
+ logging.info(f"Generating answer for query: {query}")
 
 
 
682
 
683
+ # Verificar se os modelos estão disponíveis
684
+ if not self.translator or not self.researcher or not self.presenter:
685
+ logging.error("Models not available")
686
+ return "Desculpe, o serviço está temporariamente indisponível. Por favor, tente novamente mais tarde."
687
 
688
+ # Traduzir a consulta para inglês
689
+ translation_prompt = self._format_prompt("translation", query)
690
+ logging.info(f"Translation prompt: {translation_prompt}")
691
 
692
+ try:
693
+ translation_result = self.translator.run(translation_prompt)
694
+ logging.info(f"Translation result type: {type(translation_result)}")
695
+
696
+ # Extrair o conteúdo da resposta
697
+ translation_content = self._extract_content(translation_result)
698
  logging.info(f"Translation content: {translation_content}")
699
+
700
+ if not translation_content or not translation_content.strip():
701
+ logging.error("Empty translation result")
702
+ return "Desculpe, não foi possível processar sua consulta. Por favor, tente novamente com uma pergunta diferente."
703
+
704
+ # Realizar a pesquisa
705
+ research_prompt = self._format_prompt("research", translation_content)
706
+ logging.info(f"Research prompt: {research_prompt}")
707
+
708
+ research_result = self.researcher.run(research_prompt)
709
+ logging.info(f"Research result type: {type(research_result)}")
710
+
711
+ # Extrair o conteúdo da pesquisa
712
+ research_content = self._extract_content(research_result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
713
  logging.info(f"Research content: {research_content}")
714
+
715
+ if not research_content or not research_content.strip():
716
+ logging.error("Empty research result")
717
+ return "Desculpe, não foi possível encontrar informações sobre sua consulta. Por favor, tente novamente com uma pergunta diferente."
718
+
719
+ # Apresentar os resultados
720
+ presentation_prompt = self._format_prompt("presentation", research_content)
721
+ logging.info(f"Presentation prompt: {presentation_prompt}")
722
+
723
+ presentation_result = self.presenter.run(presentation_prompt)
724
+ logging.info(f"Presentation type: {type(presentation_result)}")
725
+
726
+ # Extrair o conteúdo da apresentação
727
+ presentation_content = self._extract_content(presentation_result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
728
  logging.info(f"Presentation content: {presentation_content}")
729
 
730
+ if not presentation_content or not presentation_content.strip():
731
+ logging.error("Empty presentation result")
732
+ return "Desculpe, não foi possível formatar a resposta. Por favor, tente novamente."
 
733
 
734
+ logging.info("Answer generated successfully")
735
  return presentation_content
 
 
 
736
 
737
+ except Exception as e:
738
+ logging.error(f"Error during answer generation: {str(e)}")
739
+ return f"Desculpe, ocorreu um erro ao processar sua consulta: {str(e)}. Por favor, tente novamente mais tarde."
 
740
 
 
 
741
  except Exception as e:
742
+ logging.error(f"Unexpected error in generate_answer: {str(e)}")
743
+ return "Desculpe, ocorreu um erro inesperado. Por favor, tente novamente mais tarde."
 
 
 
 
test_model.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import sys
3
+ from services.model_handler import ModelHandler
4
+
5
+ # Configure logging
6
+ logging.basicConfig(
7
+ level=logging.INFO,
8
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
9
+ handlers=[
10
+ logging.StreamHandler(sys.stdout)
11
+ ]
12
+ )
13
+
14
+ def main():
15
+ """Test the model handler"""
16
+ try:
17
+ # Initialize the model handler
18
+ logging.info("Initializing model handler...")
19
+ model_handler = ModelHandler()
20
+
21
+ # Test query
22
+ test_query = "O que é autismo?"
23
+ logging.info(f"Testing with query: {test_query}")
24
+
25
+ # Generate answer
26
+ answer = model_handler.generate_answer(test_query)
27
+
28
+ # Print the answer
29
+ logging.info("Answer generated successfully")
30
+ print("\n" + "="*50 + "\n")
31
+ print(answer)
32
+ print("\n" + "="*50 + "\n")
33
+
34
+ except Exception as e:
35
+ logging.error(f"Error in test script: {str(e)}")
36
+ import traceback
37
+ traceback.print_exc()
38
+
39
+ if __name__ == "__main__":
40
+ main()