wakeupmh commited on
Commit
b9e2e13
·
1 Parent(s): ed14957

fix: run model

Browse files
services/__pycache__/__init__.cpython-311.pyc CHANGED
Binary files a/services/__pycache__/__init__.cpython-311.pyc and b/services/__pycache__/__init__.cpython-311.pyc differ
 
services/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (167 Bytes). View file
 
services/__pycache__/model_handler.cpython-311.pyc CHANGED
Binary files a/services/__pycache__/model_handler.cpython-311.pyc and b/services/__pycache__/model_handler.cpython-311.pyc differ
 
services/__pycache__/model_handler.cpython-312.pyc ADDED
Binary file (12 kB). View file
 
services/model_handler.py CHANGED
@@ -62,6 +62,24 @@ class LocalHuggingFaceModel(Model):
62
  """Parse the provider response delta for streaming"""
63
  return delta
64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  def generate(self, prompt: str, **kwargs):
66
  try:
67
  inputs = self.tokenizer(prompt, return_tensors="pt", padding=True)
@@ -214,7 +232,7 @@ Output:"""
214
  )
215
 
216
  # Get English translation
217
- translation = self.translator.run(translation_prompt, stream=False)
218
  if not translation:
219
  logging.error("Translation failed")
220
  return "Error: Unable to translate the query"
@@ -227,7 +245,7 @@ Output:"""
227
  )
228
 
229
  # Get research results
230
- research_results = self.researcher.run(research_prompt, stream=False)
231
  if not research_results:
232
  logging.error("Research failed")
233
  return "Error: Unable to perform research"
 
62
  """Parse the provider response delta for streaming"""
63
  return delta
64
 
65
+ async def aresponse(self, prompt: str, **kwargs) -> str:
66
+ """Async response method - required abstract method"""
67
+ return await self.ainvoke(prompt=prompt, **kwargs)
68
+
69
+ async def aresponse_stream(self, prompt: str, **kwargs):
70
+ """Async streaming response method - required abstract method"""
71
+ async for chunk in self.ainvoke_stream(prompt=prompt, **kwargs):
72
+ yield chunk
73
+
74
+ def response(self, prompt: str, **kwargs) -> str:
75
+ """Synchronous response method - required abstract method"""
76
+ return self.invoke(prompt=prompt, **kwargs)
77
+
78
+ def response_stream(self, prompt: str, **kwargs):
79
+ """Synchronous streaming response method - required abstract method"""
80
+ for chunk in self.invoke_stream(prompt=prompt, **kwargs):
81
+ yield chunk
82
+
83
  def generate(self, prompt: str, **kwargs):
84
  try:
85
  inputs = self.tokenizer(prompt, return_tensors="pt", padding=True)
 
232
  )
233
 
234
  # Get English translation
235
+ translation = self.translator.run(prompt=translation_prompt, stream=False)
236
  if not translation:
237
  logging.error("Translation failed")
238
  return "Error: Unable to translate the query"
 
245
  )
246
 
247
  # Get research results
248
+ research_results = self.researcher.run(prompt=research_prompt, stream=False)
249
  if not research_results:
250
  logging.error("Research failed")
251
  return "Error: Unable to perform research"