Trabis commited on
Commit
cd51cb3
·
verified ·
1 Parent(s): 8855231

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -11
app.py CHANGED
@@ -329,21 +329,17 @@ textarea::placeholder {
329
  """
330
 
331
  def process_question(question: str) -> Iterator[str]:
332
- """
333
- Process the question and return a response generator for streaming.
334
- """
335
  if question in question_cache:
336
- yield question_cache[question][0]
 
337
  return
338
-
339
  relevant_docs = retriever(question)
340
  context = "\n".join([doc.page_content for doc in relevant_docs])
341
-
342
  prompt = prompt_template.format_messages(
343
  context=context,
344
  question=question
345
  )
346
-
347
  full_response = ""
348
  try:
349
  for chunk in llm.stream(prompt):
@@ -351,14 +347,49 @@ def process_question(question: str) -> Iterator[str]:
351
  current_chunk = chunk
352
  else:
353
  current_chunk = chunk.content
354
-
355
  full_response += current_chunk
356
- yield full_response # Send the updated response in streaming
357
-
358
- question_cache[question] = (full_response, context)
 
 
 
 
 
359
  except Exception as e:
360
  yield f"Erreur lors du traitement : {str(e)}"
361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
362
  # def process_question(question: str) -> Iterator[str]:
363
  # """
364
  # Process the question and return a response generator for streaming, including sources.
 
329
  """
330
 
331
  def process_question(question: str) -> Iterator[str]:
 
 
 
332
  if question in question_cache:
333
+ response, docs = question_cache[question]
334
+ yield response + "\nSources:\n" + "\n".join([doc.page_content for doc in docs])
335
  return
336
+
337
  relevant_docs = retriever(question)
338
  context = "\n".join([doc.page_content for doc in relevant_docs])
 
339
  prompt = prompt_template.format_messages(
340
  context=context,
341
  question=question
342
  )
 
343
  full_response = ""
344
  try:
345
  for chunk in llm.stream(prompt):
 
347
  current_chunk = chunk
348
  else:
349
  current_chunk = chunk.content
 
350
  full_response += current_chunk
351
+ # sources = "\n".join(set([doc.metadata.get("source") for doc in relevant_docs]))
352
+ # sources = [os.path.splitext(source[1])[0] for source in sources]
353
+ # yield full_response + "\n\n\nالمصادر المحتملة :\n" + "".join(sources)
354
+ sources = [doc.metadata.get("source") for doc in relevant_docs]
355
+ sources = list(set([os.path.splitext(source)[0] for source in sources]))
356
+ yield full_response + "\n\n\nالمصادر المحتملة :\n" + "\n".join(sources)
357
+ # yield full_response + "\n\n\nالمصادر المحتملة:\n" + "\n".join([doc.metadata.get("source") for doc in relevant_docs])
358
+ question_cache[question] = (full_response, relevant_docs)
359
  except Exception as e:
360
  yield f"Erreur lors du traitement : {str(e)}"
361
 
362
+ # def process_question(question: str) -> Iterator[str]:
363
+ # """
364
+ # Process the question and return a response generator for streaming.
365
+ # """
366
+ # if question in question_cache:
367
+ # yield question_cache[question][0]
368
+ # return
369
+
370
+ # relevant_docs = retriever(question)
371
+ # context = "\n".join([doc.page_content for doc in relevant_docs])
372
+
373
+ # prompt = prompt_template.format_messages(
374
+ # context=context,
375
+ # question=question
376
+ # )
377
+
378
+ # full_response = ""
379
+ # try:
380
+ # for chunk in llm.stream(prompt):
381
+ # if isinstance(chunk, str):
382
+ # current_chunk = chunk
383
+ # else:
384
+ # current_chunk = chunk.content
385
+
386
+ # full_response += current_chunk
387
+ # yield full_response # Send the updated response in streaming
388
+
389
+ # question_cache[question] = (full_response, context)
390
+ # except Exception as e:
391
+ # yield f"Erreur lors du traitement : {str(e)}"
392
+
393
  # def process_question(question: str) -> Iterator[str]:
394
  # """
395
  # Process the question and return a response generator for streaming, including sources.