Trabis commited on
Commit
0166a8d
·
verified ·
1 Parent(s): 946f51b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -11
app.py CHANGED
@@ -328,23 +328,58 @@ textarea::placeholder {
328
  }
329
  """
330
 
331
- def process_question(question: str) -> Iterator[str]:
332
- """
333
- Process the question and return a response generator for streaming.
334
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335
  if question in question_cache:
336
- yield question_cache[question][0]
337
- return
338
 
 
339
  relevant_docs = retriever(question)
340
- context = "\n".join([doc.page_content for doc in relevant_docs])
341
-
342
- prompt = prompt_template.format_messages(
 
 
 
 
 
343
  context=context,
344
  question=question
345
  )
346
 
347
  full_response = ""
 
348
  try:
349
  for chunk in llm.stream(prompt):
350
  if isinstance(chunk, str):
@@ -353,13 +388,14 @@ def process_question(question: str) -> Iterator[str]:
353
  current_chunk = chunk.content
354
 
355
  full_response += current_chunk
356
- yield full_response # Send the updated response in streaming
357
 
358
- question_cache[question] = (full_response, context)
359
  except Exception as e:
360
  yield f"Erreur lors du traitement : {str(e)}"
361
 
362
 
 
363
  def gradio_stream(question: str, chat_history: list) -> Iterator[list]:
364
  """
365
  Format the output for Gradio Chatbot component with streaming.
 
328
  }
329
  """
330
 
331
+ # def process_question(question: str) -> Iterator[str]:
332
+ # """
333
+ # Process the question and return a response generator for streaming.
334
+ # """
335
+ # if question in question_cache:
336
+ # yield question_cache[question][0]
337
+ # return
338
+
339
+ # relevant_docs = retriever(question)
340
+ # context = "\n".join([doc.page_content for doc in relevant_docs])
341
+
342
+ # prompt = prompt_template.format_messages(
343
+ # context=context,
344
+ # question=question
345
+ # )
346
+
347
+ # full_response = ""
348
+ # try:
349
+ # for chunk in llm.stream(prompt):
350
+ # if isinstance(chunk, str):
351
+ # current_chunk = chunk
352
+ # else:
353
+ # current_chunk = chunk.content
354
+
355
+ # full_response += current_chunk
356
+ # yield full_response # Send the updated response in streaming
357
+
358
+ # question_cache[question] = (full_response, context)
359
+ # except Exception as e:
360
+ # yield f"Erreur lors du traitement : {str(e)}"
361
+
362
+ def process_question(question: str) -> tuple[str, list[str]]:
363
+ # Check cache first
364
  if question in question_cache:
365
+ return question_cache[question]
 
366
 
367
+ # Get relevant documents using the retriever
368
  relevant_docs = retriever(question)
369
+
370
+ # Extract the content and sources
371
+ context = "\n".join(doc.page_content for doc in relevant_docs)
372
+ sources = [doc.metadata["source"] for doc in relevant_docs]
373
+ sources = os.path.splitext(sources[0])[0] if sources else "غير معروف"
374
+
375
+ # Generate the prompt with the context
376
+ prompt = prompt_template.format(
377
  context=context,
378
  question=question
379
  )
380
 
381
  full_response = ""
382
+
383
  try:
384
  for chunk in llm.stream(prompt):
385
  if isinstance(chunk, str):
 
388
  current_chunk = chunk.content
389
 
390
  full_response += current_chunk
391
+ yield full_response, sources # Send the updated response in streaming
392
 
393
+ question_cache[question,sources] = (full_response, context)
394
  except Exception as e:
395
  yield f"Erreur lors du traitement : {str(e)}"
396
 
397
 
398
+
399
  def gradio_stream(question: str, chat_history: list) -> Iterator[list]:
400
  """
401
  Format the output for Gradio Chatbot component with streaming.