Trabis commited on
Commit
85baf0a
·
verified ·
1 Parent(s): 0166a8d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -46
app.py CHANGED
@@ -328,58 +328,23 @@ textarea::placeholder {
328
  }
329
  """
330
 
331
- # def process_question(question: str) -> Iterator[str]:
332
- # """
333
- # Process the question and return a response generator for streaming.
334
- # """
335
- # if question in question_cache:
336
- # yield question_cache[question][0]
337
- # return
338
-
339
- # relevant_docs = retriever(question)
340
- # context = "\n".join([doc.page_content for doc in relevant_docs])
341
-
342
- # prompt = prompt_template.format_messages(
343
- # context=context,
344
- # question=question
345
- # )
346
-
347
- # full_response = ""
348
- # try:
349
- # for chunk in llm.stream(prompt):
350
- # if isinstance(chunk, str):
351
- # current_chunk = chunk
352
- # else:
353
- # current_chunk = chunk.content
354
-
355
- # full_response += current_chunk
356
- # yield full_response # Send the updated response in streaming
357
-
358
- # question_cache[question] = (full_response, context)
359
- # except Exception as e:
360
- # yield f"Erreur lors du traitement : {str(e)}"
361
-
362
- def process_question(question: str) -> tuple[str, list[str]]:
363
- # Check cache first
364
  if question in question_cache:
365
- return question_cache[question]
 
366
 
367
- # Get relevant documents using the retriever
368
  relevant_docs = retriever(question)
369
-
370
- # Extract the content and sources
371
- context = "\n".join(doc.page_content for doc in relevant_docs)
372
- sources = [doc.metadata["source"] for doc in relevant_docs]
373
- sources = os.path.splitext(sources[0])[0] if sources else "غير معروف"
374
-
375
- # Generate the prompt with the context
376
- prompt = prompt_template.format(
377
  context=context,
378
  question=question
379
  )
380
 
381
  full_response = ""
382
-
383
  try:
384
  for chunk in llm.stream(prompt):
385
  if isinstance(chunk, str):
@@ -388,12 +353,47 @@ def process_question(question: str) -> tuple[str, list[str]]:
388
  current_chunk = chunk.content
389
 
390
  full_response += current_chunk
391
- yield full_response, sources # Send the updated response in streaming
392
 
393
- question_cache[question,sources] = (full_response, context)
394
  except Exception as e:
395
  yield f"Erreur lors du traitement : {str(e)}"
396
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
 
398
 
399
  def gradio_stream(question: str, chat_history: list) -> Iterator[list]:
 
328
  }
329
  """
330
 
331
+ def process_question(question: str) -> Iterator[str]:
332
+ """
333
+ Process the question and return a response generator for streaming.
334
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335
  if question in question_cache:
336
+ yield question_cache[question][0]
337
+ return
338
 
 
339
  relevant_docs = retriever(question)
340
+ context = "\n".join([doc.page_content for doc in relevant_docs])
341
+
342
+ prompt = prompt_template.format_messages(
 
 
 
 
 
343
  context=context,
344
  question=question
345
  )
346
 
347
  full_response = ""
 
348
  try:
349
  for chunk in llm.stream(prompt):
350
  if isinstance(chunk, str):
 
353
  current_chunk = chunk.content
354
 
355
  full_response += current_chunk
356
+ yield full_response # Send the updated response in streaming
357
 
358
+ question_cache[question] = (full_response, context)
359
  except Exception as e:
360
  yield f"Erreur lors du traitement : {str(e)}"
361
 
362
+ # def process_question(question: str) -> tuple[str, list[str]]:
363
+ # # Check cache first
364
+ # if question in question_cache:
365
+ # return question_cache[question]
366
+
367
+ # # Get relevant documents using the retriever
368
+ # relevant_docs = retriever(question)
369
+
370
+ # # Extract the content and sources
371
+ # context = "\n".join(doc.page_content for doc in relevant_docs)
372
+ # sources = [doc.metadata["source"] for doc in relevant_docs]
373
+ # sources = os.path.splitext(sources[0])[0] if sources else "غير معروف"
374
+
375
+ # # Generate the prompt with the context
376
+ # prompt = prompt_template.format(
377
+ # context=context,
378
+ # question=question
379
+ # )
380
+
381
+ # full_response = ""
382
+
383
+ # try:
384
+ # for chunk in llm.stream(prompt):
385
+ # if isinstance(chunk, str):
386
+ # current_chunk = chunk
387
+ # else:
388
+ # current_chunk = chunk.content
389
+
390
+ # full_response += current_chunk
391
+ # yield full_response, sources # Send the updated response in streaming
392
+
393
+ # question_cache[question,sources] = (full_response, context)
394
+ # except Exception as e:
395
+ # yield f"Erreur lors du traitement : {str(e)}"
396
+
397
 
398
 
399
  def gradio_stream(question: str, chat_history: list) -> Iterator[list]: